* arm-tdep.c (arm_skip_prologue): Extending producer check to support LLVM compiler.
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-1989, 1991-1993, 1995-1996, 1998-2012 Free
4 Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include <ctype.h> /* XXX for isupper (). */
22
23 #include "defs.h"
24 #include "frame.h"
25 #include "inferior.h"
26 #include "gdbcmd.h"
27 #include "gdbcore.h"
28 #include "gdb_string.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "regcache.h"
31 #include "reggroups.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "remote.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "record.h"
59
60 #include "features/arm-with-m.c"
61 #include "features/arm-with-m-fpa-layout.c"
62 #include "features/arm-with-m-vfp-d16.c"
63 #include "features/arm-with-iwmmxt.c"
64 #include "features/arm-with-vfpv2.c"
65 #include "features/arm-with-vfpv3.c"
66 #include "features/arm-with-neon.c"
67
68 static int arm_debug;
69
70 /* Macros for setting and testing a bit in a minimal symbol that marks
71 it as Thumb function. The MSB of the minimal symbol's "info" field
72 is used for this purpose.
73
74 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
75 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
76
77 #define MSYMBOL_SET_SPECIAL(msym) \
78 MSYMBOL_TARGET_FLAG_1 (msym) = 1
79
80 #define MSYMBOL_IS_SPECIAL(msym) \
81 MSYMBOL_TARGET_FLAG_1 (msym)
82
83 /* Per-objfile data used for mapping symbols. */
84 static const struct objfile_data *arm_objfile_data_key;
85
86 struct arm_mapping_symbol
87 {
88 bfd_vma value;
89 char type;
90 };
91 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
92 DEF_VEC_O(arm_mapping_symbol_s);
93
94 struct arm_per_objfile
95 {
96 VEC(arm_mapping_symbol_s) **section_maps;
97 };
98
99 /* The list of available "set arm ..." and "show arm ..." commands. */
100 static struct cmd_list_element *setarmcmdlist = NULL;
101 static struct cmd_list_element *showarmcmdlist = NULL;
102
103 /* The type of floating-point to use. Keep this in sync with enum
104 arm_float_model, and the help string in _initialize_arm_tdep. */
105 static const char *const fp_model_strings[] =
106 {
107 "auto",
108 "softfpa",
109 "fpa",
110 "softvfp",
111 "vfp",
112 NULL
113 };
114
115 /* A variable that can be configured by the user. */
116 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
117 static const char *current_fp_model = "auto";
118
119 /* The ABI to use. Keep this in sync with arm_abi_kind. */
120 static const char *const arm_abi_strings[] =
121 {
122 "auto",
123 "APCS",
124 "AAPCS",
125 NULL
126 };
127
128 /* A variable that can be configured by the user. */
129 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
130 static const char *arm_abi_string = "auto";
131
132 /* The execution mode to assume. */
133 static const char *const arm_mode_strings[] =
134 {
135 "auto",
136 "arm",
137 "thumb",
138 NULL
139 };
140
141 static const char *arm_fallback_mode_string = "auto";
142 static const char *arm_force_mode_string = "auto";
143
144 /* Internal override of the execution mode. -1 means no override,
145 0 means override to ARM mode, 1 means override to Thumb mode.
146 The effect is the same as if arm_force_mode has been set by the
147 user (except the internal override has precedence over a user's
148 arm_force_mode override). */
149 static int arm_override_mode = -1;
150
151 /* Number of different reg name sets (options). */
152 static int num_disassembly_options;
153
154 /* The standard register names, and all the valid aliases for them. Note
155 that `fp', `sp' and `pc' are not added in this alias list, because they
156 have been added as builtin user registers in
157 std-regs.c:_initialize_frame_reg. */
158 static const struct
159 {
160 const char *name;
161 int regnum;
162 } arm_register_aliases[] = {
163 /* Basic register numbers. */
164 { "r0", 0 },
165 { "r1", 1 },
166 { "r2", 2 },
167 { "r3", 3 },
168 { "r4", 4 },
169 { "r5", 5 },
170 { "r6", 6 },
171 { "r7", 7 },
172 { "r8", 8 },
173 { "r9", 9 },
174 { "r10", 10 },
175 { "r11", 11 },
176 { "r12", 12 },
177 { "r13", 13 },
178 { "r14", 14 },
179 { "r15", 15 },
180 /* Synonyms (argument and variable registers). */
181 { "a1", 0 },
182 { "a2", 1 },
183 { "a3", 2 },
184 { "a4", 3 },
185 { "v1", 4 },
186 { "v2", 5 },
187 { "v3", 6 },
188 { "v4", 7 },
189 { "v5", 8 },
190 { "v6", 9 },
191 { "v7", 10 },
192 { "v8", 11 },
193 /* Other platform-specific names for r9. */
194 { "sb", 9 },
195 { "tr", 9 },
196 /* Special names. */
197 { "ip", 12 },
198 { "lr", 14 },
199 /* Names used by GCC (not listed in the ARM EABI). */
200 { "sl", 10 },
201 /* A special name from the older ATPCS. */
202 { "wr", 7 },
203 };
204
205 static const char *const arm_register_names[] =
206 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
207 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
208 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
209 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
210 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
211 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
212 "fps", "cpsr" }; /* 24 25 */
213
214 /* Valid register name styles. */
215 static const char **valid_disassembly_styles;
216
217 /* Disassembly style to use. Default to "std" register names. */
218 static const char *disassembly_style;
219
220 /* This is used to keep the bfd arch_info in sync with the disassembly
221 style. */
222 static void set_disassembly_style_sfunc(char *, int,
223 struct cmd_list_element *);
224 static void set_disassembly_style (void);
225
226 static void convert_from_extended (const struct floatformat *, const void *,
227 void *, int);
228 static void convert_to_extended (const struct floatformat *, void *,
229 const void *, int);
230
231 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
232 struct regcache *regcache,
233 int regnum, gdb_byte *buf);
234 static void arm_neon_quad_write (struct gdbarch *gdbarch,
235 struct regcache *regcache,
236 int regnum, const gdb_byte *buf);
237
238 static int thumb_insn_size (unsigned short inst1);
239
240 struct arm_prologue_cache
241 {
242 /* The stack pointer at the time this frame was created; i.e. the
243 caller's stack pointer when this function was called. It is used
244 to identify this frame. */
245 CORE_ADDR prev_sp;
246
247 /* The frame base for this frame is just prev_sp - frame size.
248 FRAMESIZE is the distance from the frame pointer to the
249 initial stack pointer. */
250
251 int framesize;
252
253 /* The register used to hold the frame pointer for this frame. */
254 int framereg;
255
256 /* Saved register offsets. */
257 struct trad_frame_saved_reg *saved_regs;
258 };
259
260 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
261 CORE_ADDR prologue_start,
262 CORE_ADDR prologue_end,
263 struct arm_prologue_cache *cache);
264
265 /* Architecture version for displaced stepping. This effects the behaviour of
266 certain instructions, and really should not be hard-wired. */
267
268 #define DISPLACED_STEPPING_ARCH_VERSION 5
269
270 /* Addresses for calling Thumb functions have the bit 0 set.
271 Here are some macros to test, set, or clear bit 0 of addresses. */
272 #define IS_THUMB_ADDR(addr) ((addr) & 1)
273 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
274 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
275
276 /* Set to true if the 32-bit mode is in use. */
277
278 int arm_apcs_32 = 1;
279
280 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
281
282 int
283 arm_psr_thumb_bit (struct gdbarch *gdbarch)
284 {
285 if (gdbarch_tdep (gdbarch)->is_m)
286 return XPSR_T;
287 else
288 return CPSR_T;
289 }
290
291 /* Determine if FRAME is executing in Thumb mode. */
292
293 int
294 arm_frame_is_thumb (struct frame_info *frame)
295 {
296 CORE_ADDR cpsr;
297 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
298
299 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
300 directly (from a signal frame or dummy frame) or by interpreting
301 the saved LR (from a prologue or DWARF frame). So consult it and
302 trust the unwinders. */
303 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
304
305 return (cpsr & t_bit) != 0;
306 }
307
308 /* Callback for VEC_lower_bound. */
309
310 static inline int
311 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
312 const struct arm_mapping_symbol *rhs)
313 {
314 return lhs->value < rhs->value;
315 }
316
317 /* Search for the mapping symbol covering MEMADDR. If one is found,
318 return its type. Otherwise, return 0. If START is non-NULL,
319 set *START to the location of the mapping symbol. */
320
321 static char
322 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
323 {
324 struct obj_section *sec;
325
326 /* If there are mapping symbols, consult them. */
327 sec = find_pc_section (memaddr);
328 if (sec != NULL)
329 {
330 struct arm_per_objfile *data;
331 VEC(arm_mapping_symbol_s) *map;
332 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
333 0 };
334 unsigned int idx;
335
336 data = objfile_data (sec->objfile, arm_objfile_data_key);
337 if (data != NULL)
338 {
339 map = data->section_maps[sec->the_bfd_section->index];
340 if (!VEC_empty (arm_mapping_symbol_s, map))
341 {
342 struct arm_mapping_symbol *map_sym;
343
344 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
345 arm_compare_mapping_symbols);
346
347 /* VEC_lower_bound finds the earliest ordered insertion
348 point. If the following symbol starts at this exact
349 address, we use that; otherwise, the preceding
350 mapping symbol covers this address. */
351 if (idx < VEC_length (arm_mapping_symbol_s, map))
352 {
353 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
354 if (map_sym->value == map_key.value)
355 {
356 if (start)
357 *start = map_sym->value + obj_section_addr (sec);
358 return map_sym->type;
359 }
360 }
361
362 if (idx > 0)
363 {
364 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
365 if (start)
366 *start = map_sym->value + obj_section_addr (sec);
367 return map_sym->type;
368 }
369 }
370 }
371 }
372
373 return 0;
374 }
375
376 /* Determine if the program counter specified in MEMADDR is in a Thumb
377 function. This function should be called for addresses unrelated to
378 any executing frame; otherwise, prefer arm_frame_is_thumb. */
379
380 int
381 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
382 {
383 struct minimal_symbol *sym;
384 char type;
385 struct displaced_step_closure* dsc
386 = get_displaced_step_closure_by_addr(memaddr);
387
388 /* If checking the mode of displaced instruction in copy area, the mode
389 should be determined by instruction on the original address. */
390 if (dsc)
391 {
392 if (debug_displaced)
393 fprintf_unfiltered (gdb_stdlog,
394 "displaced: check mode of %.8lx instead of %.8lx\n",
395 (unsigned long) dsc->insn_addr,
396 (unsigned long) memaddr);
397 memaddr = dsc->insn_addr;
398 }
399
400 /* If bit 0 of the address is set, assume this is a Thumb address. */
401 if (IS_THUMB_ADDR (memaddr))
402 return 1;
403
404 /* Respect internal mode override if active. */
405 if (arm_override_mode != -1)
406 return arm_override_mode;
407
408 /* If the user wants to override the symbol table, let him. */
409 if (strcmp (arm_force_mode_string, "arm") == 0)
410 return 0;
411 if (strcmp (arm_force_mode_string, "thumb") == 0)
412 return 1;
413
414 /* ARM v6-M and v7-M are always in Thumb mode. */
415 if (gdbarch_tdep (gdbarch)->is_m)
416 return 1;
417
418 /* If there are mapping symbols, consult them. */
419 type = arm_find_mapping_symbol (memaddr, NULL);
420 if (type)
421 return type == 't';
422
423 /* Thumb functions have a "special" bit set in minimal symbols. */
424 sym = lookup_minimal_symbol_by_pc (memaddr);
425 if (sym)
426 return (MSYMBOL_IS_SPECIAL (sym));
427
428 /* If the user wants to override the fallback mode, let them. */
429 if (strcmp (arm_fallback_mode_string, "arm") == 0)
430 return 0;
431 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
432 return 1;
433
434 /* If we couldn't find any symbol, but we're talking to a running
435 target, then trust the current value of $cpsr. This lets
436 "display/i $pc" always show the correct mode (though if there is
437 a symbol table we will not reach here, so it still may not be
438 displayed in the mode it will be executed). */
439 if (target_has_registers)
440 return arm_frame_is_thumb (get_current_frame ());
441
442 /* Otherwise we're out of luck; we assume ARM. */
443 return 0;
444 }
445
446 /* Remove useless bits from addresses in a running program. */
447 static CORE_ADDR
448 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
449 {
450 if (arm_apcs_32)
451 return UNMAKE_THUMB_ADDR (val);
452 else
453 return (val & 0x03fffffc);
454 }
455
456 /* When reading symbols, we need to zap the low bit of the address,
457 which may be set to 1 for Thumb functions. */
458 static CORE_ADDR
459 arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val)
460 {
461 return val & ~1;
462 }
463
464 /* Return 1 if PC is the start of a compiler helper function which
465 can be safely ignored during prologue skipping. IS_THUMB is true
466 if the function is known to be a Thumb function due to the way it
467 is being called. */
468 static int
469 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
470 {
471 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
472 struct minimal_symbol *msym;
473
474 msym = lookup_minimal_symbol_by_pc (pc);
475 if (msym != NULL
476 && SYMBOL_VALUE_ADDRESS (msym) == pc
477 && SYMBOL_LINKAGE_NAME (msym) != NULL)
478 {
479 const char *name = SYMBOL_LINKAGE_NAME (msym);
480
481 /* The GNU linker's Thumb call stub to foo is named
482 __foo_from_thumb. */
483 if (strstr (name, "_from_thumb") != NULL)
484 name += 2;
485
486 /* On soft-float targets, __truncdfsf2 is called to convert promoted
487 arguments to their argument types in non-prototyped
488 functions. */
489 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
490 return 1;
491 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
492 return 1;
493
494 /* Internal functions related to thread-local storage. */
495 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
496 return 1;
497 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
498 return 1;
499 }
500 else
501 {
502 /* If we run against a stripped glibc, we may be unable to identify
503 special functions by name. Check for one important case,
504 __aeabi_read_tp, by comparing the *code* against the default
505 implementation (this is hand-written ARM assembler in glibc). */
506
507 if (!is_thumb
508 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
509 == 0xe3e00a0f /* mov r0, #0xffff0fff */
510 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
511 == 0xe240f01f) /* sub pc, r0, #31 */
512 return 1;
513 }
514
515 return 0;
516 }
517
518 /* Support routines for instruction parsing. */
519 #define submask(x) ((1L << ((x) + 1)) - 1)
520 #define bit(obj,st) (((obj) >> (st)) & 1)
521 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
522 #define sbits(obj,st,fn) \
523 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
524 #define BranchDest(addr,instr) \
525 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
526
527 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
528 the first 16-bit of instruction, and INSN2 is the second 16-bit of
529 instruction. */
530 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
531 ((bits ((insn1), 0, 3) << 12) \
532 | (bits ((insn1), 10, 10) << 11) \
533 | (bits ((insn2), 12, 14) << 8) \
534 | bits ((insn2), 0, 7))
535
536 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
537 the 32-bit instruction. */
538 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
539 ((bits ((insn), 16, 19) << 12) \
540 | bits ((insn), 0, 11))
541
542 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
543
544 static unsigned int
545 thumb_expand_immediate (unsigned int imm)
546 {
547 unsigned int count = imm >> 7;
548
549 if (count < 8)
550 switch (count / 2)
551 {
552 case 0:
553 return imm & 0xff;
554 case 1:
555 return (imm & 0xff) | ((imm & 0xff) << 16);
556 case 2:
557 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
558 case 3:
559 return (imm & 0xff) | ((imm & 0xff) << 8)
560 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
561 }
562
563 return (0x80 | (imm & 0x7f)) << (32 - count);
564 }
565
566 /* Return 1 if the 16-bit Thumb instruction INST might change
567 control flow, 0 otherwise. */
568
569 static int
570 thumb_instruction_changes_pc (unsigned short inst)
571 {
572 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
573 return 1;
574
575 if ((inst & 0xf000) == 0xd000) /* conditional branch */
576 return 1;
577
578 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
579 return 1;
580
581 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
582 return 1;
583
584 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
585 return 1;
586
587 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
588 return 1;
589
590 return 0;
591 }
592
593 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
594 might change control flow, 0 otherwise. */
595
596 static int
597 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
598 {
599 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
600 {
601 /* Branches and miscellaneous control instructions. */
602
603 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
604 {
605 /* B, BL, BLX. */
606 return 1;
607 }
608 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
609 {
610 /* SUBS PC, LR, #imm8. */
611 return 1;
612 }
613 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
614 {
615 /* Conditional branch. */
616 return 1;
617 }
618
619 return 0;
620 }
621
622 if ((inst1 & 0xfe50) == 0xe810)
623 {
624 /* Load multiple or RFE. */
625
626 if (bit (inst1, 7) && !bit (inst1, 8))
627 {
628 /* LDMIA or POP */
629 if (bit (inst2, 15))
630 return 1;
631 }
632 else if (!bit (inst1, 7) && bit (inst1, 8))
633 {
634 /* LDMDB */
635 if (bit (inst2, 15))
636 return 1;
637 }
638 else if (bit (inst1, 7) && bit (inst1, 8))
639 {
640 /* RFEIA */
641 return 1;
642 }
643 else if (!bit (inst1, 7) && !bit (inst1, 8))
644 {
645 /* RFEDB */
646 return 1;
647 }
648
649 return 0;
650 }
651
652 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
653 {
654 /* MOV PC or MOVS PC. */
655 return 1;
656 }
657
658 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
659 {
660 /* LDR PC. */
661 if (bits (inst1, 0, 3) == 15)
662 return 1;
663 if (bit (inst1, 7))
664 return 1;
665 if (bit (inst2, 11))
666 return 1;
667 if ((inst2 & 0x0fc0) == 0x0000)
668 return 1;
669
670 return 0;
671 }
672
673 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
674 {
675 /* TBB. */
676 return 1;
677 }
678
679 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
680 {
681 /* TBH. */
682 return 1;
683 }
684
685 return 0;
686 }
687
688 /* Analyze a Thumb prologue, looking for a recognizable stack frame
689 and frame pointer. Scan until we encounter a store that could
690 clobber the stack frame unexpectedly, or an unknown instruction.
691 Return the last address which is definitely safe to skip for an
692 initial breakpoint. */
693
694 static CORE_ADDR
695 thumb_analyze_prologue (struct gdbarch *gdbarch,
696 CORE_ADDR start, CORE_ADDR limit,
697 struct arm_prologue_cache *cache)
698 {
699 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
700 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
701 int i;
702 pv_t regs[16];
703 struct pv_area *stack;
704 struct cleanup *back_to;
705 CORE_ADDR offset;
706 CORE_ADDR unrecognized_pc = 0;
707
708 for (i = 0; i < 16; i++)
709 regs[i] = pv_register (i, 0);
710 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
711 back_to = make_cleanup_free_pv_area (stack);
712
713 while (start < limit)
714 {
715 unsigned short insn;
716
717 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
718
719 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
720 {
721 int regno;
722 int mask;
723
724 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
725 break;
726
727 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
728 whether to save LR (R14). */
729 mask = (insn & 0xff) | ((insn & 0x100) << 6);
730
731 /* Calculate offsets of saved R0-R7 and LR. */
732 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
733 if (mask & (1 << regno))
734 {
735 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
736 -4);
737 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
738 }
739 }
740 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
741 sub sp, #simm */
742 {
743 offset = (insn & 0x7f) << 2; /* get scaled offset */
744 if (insn & 0x80) /* Check for SUB. */
745 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
746 -offset);
747 else
748 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
749 offset);
750 }
751 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
752 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
753 (insn & 0xff) << 2);
754 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
755 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
756 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
757 bits (insn, 6, 8));
758 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
759 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
760 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
761 bits (insn, 0, 7));
762 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
763 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
764 && pv_is_constant (regs[bits (insn, 3, 5)]))
765 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
766 regs[bits (insn, 6, 8)]);
767 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
768 && pv_is_constant (regs[bits (insn, 3, 6)]))
769 {
770 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
771 int rm = bits (insn, 3, 6);
772 regs[rd] = pv_add (regs[rd], regs[rm]);
773 }
774 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
775 {
776 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
777 int src_reg = (insn & 0x78) >> 3;
778 regs[dst_reg] = regs[src_reg];
779 }
780 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
781 {
782 /* Handle stores to the stack. Normally pushes are used,
783 but with GCC -mtpcs-frame, there may be other stores
784 in the prologue to create the frame. */
785 int regno = (insn >> 8) & 0x7;
786 pv_t addr;
787
788 offset = (insn & 0xff) << 2;
789 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
790
791 if (pv_area_store_would_trash (stack, addr))
792 break;
793
794 pv_area_store (stack, addr, 4, regs[regno]);
795 }
796 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
797 {
798 int rd = bits (insn, 0, 2);
799 int rn = bits (insn, 3, 5);
800 pv_t addr;
801
802 offset = bits (insn, 6, 10) << 2;
803 addr = pv_add_constant (regs[rn], offset);
804
805 if (pv_area_store_would_trash (stack, addr))
806 break;
807
808 pv_area_store (stack, addr, 4, regs[rd]);
809 }
810 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
811 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
812 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
813 /* Ignore stores of argument registers to the stack. */
814 ;
815 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
816 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
817 /* Ignore block loads from the stack, potentially copying
818 parameters from memory. */
819 ;
820 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
821 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
822 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
823 /* Similarly ignore single loads from the stack. */
824 ;
825 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
826 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
827 /* Skip register copies, i.e. saves to another register
828 instead of the stack. */
829 ;
830 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
831 /* Recognize constant loads; even with small stacks these are necessary
832 on Thumb. */
833 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
834 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
835 {
836 /* Constant pool loads, for the same reason. */
837 unsigned int constant;
838 CORE_ADDR loc;
839
840 loc = start + 4 + bits (insn, 0, 7) * 4;
841 constant = read_memory_unsigned_integer (loc, 4, byte_order);
842 regs[bits (insn, 8, 10)] = pv_constant (constant);
843 }
844 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
845 {
846 unsigned short inst2;
847
848 inst2 = read_memory_unsigned_integer (start + 2, 2,
849 byte_order_for_code);
850
851 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
852 {
853 /* BL, BLX. Allow some special function calls when
854 skipping the prologue; GCC generates these before
855 storing arguments to the stack. */
856 CORE_ADDR nextpc;
857 int j1, j2, imm1, imm2;
858
859 imm1 = sbits (insn, 0, 10);
860 imm2 = bits (inst2, 0, 10);
861 j1 = bit (inst2, 13);
862 j2 = bit (inst2, 11);
863
864 offset = ((imm1 << 12) + (imm2 << 1));
865 offset ^= ((!j2) << 22) | ((!j1) << 23);
866
867 nextpc = start + 4 + offset;
868 /* For BLX make sure to clear the low bits. */
869 if (bit (inst2, 12) == 0)
870 nextpc = nextpc & 0xfffffffc;
871
872 if (!skip_prologue_function (gdbarch, nextpc,
873 bit (inst2, 12) != 0))
874 break;
875 }
876
877 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
878 { registers } */
879 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
880 {
881 pv_t addr = regs[bits (insn, 0, 3)];
882 int regno;
883
884 if (pv_area_store_would_trash (stack, addr))
885 break;
886
887 /* Calculate offsets of saved registers. */
888 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
889 if (inst2 & (1 << regno))
890 {
891 addr = pv_add_constant (addr, -4);
892 pv_area_store (stack, addr, 4, regs[regno]);
893 }
894
895 if (insn & 0x0020)
896 regs[bits (insn, 0, 3)] = addr;
897 }
898
899 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
900 [Rn, #+/-imm]{!} */
901 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
902 {
903 int regno1 = bits (inst2, 12, 15);
904 int regno2 = bits (inst2, 8, 11);
905 pv_t addr = regs[bits (insn, 0, 3)];
906
907 offset = inst2 & 0xff;
908 if (insn & 0x0080)
909 addr = pv_add_constant (addr, offset);
910 else
911 addr = pv_add_constant (addr, -offset);
912
913 if (pv_area_store_would_trash (stack, addr))
914 break;
915
916 pv_area_store (stack, addr, 4, regs[regno1]);
917 pv_area_store (stack, pv_add_constant (addr, 4),
918 4, regs[regno2]);
919
920 if (insn & 0x0020)
921 regs[bits (insn, 0, 3)] = addr;
922 }
923
924 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
925 && (inst2 & 0x0c00) == 0x0c00
926 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
927 {
928 int regno = bits (inst2, 12, 15);
929 pv_t addr = regs[bits (insn, 0, 3)];
930
931 offset = inst2 & 0xff;
932 if (inst2 & 0x0200)
933 addr = pv_add_constant (addr, offset);
934 else
935 addr = pv_add_constant (addr, -offset);
936
937 if (pv_area_store_would_trash (stack, addr))
938 break;
939
940 pv_area_store (stack, addr, 4, regs[regno]);
941
942 if (inst2 & 0x0100)
943 regs[bits (insn, 0, 3)] = addr;
944 }
945
946 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
947 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
948 {
949 int regno = bits (inst2, 12, 15);
950 pv_t addr;
951
952 offset = inst2 & 0xfff;
953 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
954
955 if (pv_area_store_would_trash (stack, addr))
956 break;
957
958 pv_area_store (stack, addr, 4, regs[regno]);
959 }
960
961 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
962 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
963 /* Ignore stores of argument registers to the stack. */
964 ;
965
966 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
967 && (inst2 & 0x0d00) == 0x0c00
968 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
969 /* Ignore stores of argument registers to the stack. */
970 ;
971
972 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
973 { registers } */
974 && (inst2 & 0x8000) == 0x0000
975 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
976 /* Ignore block loads from the stack, potentially copying
977 parameters from memory. */
978 ;
979
980 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
981 [Rn, #+/-imm] */
982 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
983 /* Similarly ignore dual loads from the stack. */
984 ;
985
986 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
987 && (inst2 & 0x0d00) == 0x0c00
988 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
989 /* Similarly ignore single loads from the stack. */
990 ;
991
992 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
993 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
994 /* Similarly ignore single loads from the stack. */
995 ;
996
997 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
998 && (inst2 & 0x8000) == 0x0000)
999 {
1000 unsigned int imm = ((bits (insn, 10, 10) << 11)
1001 | (bits (inst2, 12, 14) << 8)
1002 | bits (inst2, 0, 7));
1003
1004 regs[bits (inst2, 8, 11)]
1005 = pv_add_constant (regs[bits (insn, 0, 3)],
1006 thumb_expand_immediate (imm));
1007 }
1008
1009 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1010 && (inst2 & 0x8000) == 0x0000)
1011 {
1012 unsigned int imm = ((bits (insn, 10, 10) << 11)
1013 | (bits (inst2, 12, 14) << 8)
1014 | bits (inst2, 0, 7));
1015
1016 regs[bits (inst2, 8, 11)]
1017 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1018 }
1019
1020 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1021 && (inst2 & 0x8000) == 0x0000)
1022 {
1023 unsigned int imm = ((bits (insn, 10, 10) << 11)
1024 | (bits (inst2, 12, 14) << 8)
1025 | bits (inst2, 0, 7));
1026
1027 regs[bits (inst2, 8, 11)]
1028 = pv_add_constant (regs[bits (insn, 0, 3)],
1029 - (CORE_ADDR) thumb_expand_immediate (imm));
1030 }
1031
1032 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1033 && (inst2 & 0x8000) == 0x0000)
1034 {
1035 unsigned int imm = ((bits (insn, 10, 10) << 11)
1036 | (bits (inst2, 12, 14) << 8)
1037 | bits (inst2, 0, 7));
1038
1039 regs[bits (inst2, 8, 11)]
1040 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1041 }
1042
1043 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1044 {
1045 unsigned int imm = ((bits (insn, 10, 10) << 11)
1046 | (bits (inst2, 12, 14) << 8)
1047 | bits (inst2, 0, 7));
1048
1049 regs[bits (inst2, 8, 11)]
1050 = pv_constant (thumb_expand_immediate (imm));
1051 }
1052
1053 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1054 {
1055 unsigned int imm
1056 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1057
1058 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1059 }
1060
1061 else if (insn == 0xea5f /* mov.w Rd,Rm */
1062 && (inst2 & 0xf0f0) == 0)
1063 {
1064 int dst_reg = (inst2 & 0x0f00) >> 8;
1065 int src_reg = inst2 & 0xf;
1066 regs[dst_reg] = regs[src_reg];
1067 }
1068
1069 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1070 {
1071 /* Constant pool loads. */
1072 unsigned int constant;
1073 CORE_ADDR loc;
1074
1075 offset = bits (insn, 0, 11);
1076 if (insn & 0x0080)
1077 loc = start + 4 + offset;
1078 else
1079 loc = start + 4 - offset;
1080
1081 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1082 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1083 }
1084
1085 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1086 {
1087 /* Constant pool loads. */
1088 unsigned int constant;
1089 CORE_ADDR loc;
1090
1091 offset = bits (insn, 0, 7) << 2;
1092 if (insn & 0x0080)
1093 loc = start + 4 + offset;
1094 else
1095 loc = start + 4 - offset;
1096
1097 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1098 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1099
1100 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1101 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1102 }
1103
1104 else if (thumb2_instruction_changes_pc (insn, inst2))
1105 {
1106 /* Don't scan past anything that might change control flow. */
1107 break;
1108 }
1109 else
1110 {
1111 /* The optimizer might shove anything into the prologue,
1112 so we just skip what we don't recognize. */
1113 unrecognized_pc = start;
1114 }
1115
1116 start += 2;
1117 }
1118 else if (thumb_instruction_changes_pc (insn))
1119 {
1120 /* Don't scan past anything that might change control flow. */
1121 break;
1122 }
1123 else
1124 {
1125 /* The optimizer might shove anything into the prologue,
1126 so we just skip what we don't recognize. */
1127 unrecognized_pc = start;
1128 }
1129
1130 start += 2;
1131 }
1132
1133 if (arm_debug)
1134 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1135 paddress (gdbarch, start));
1136
1137 if (unrecognized_pc == 0)
1138 unrecognized_pc = start;
1139
1140 if (cache == NULL)
1141 {
1142 do_cleanups (back_to);
1143 return unrecognized_pc;
1144 }
1145
1146 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1147 {
1148 /* Frame pointer is fp. Frame size is constant. */
1149 cache->framereg = ARM_FP_REGNUM;
1150 cache->framesize = -regs[ARM_FP_REGNUM].k;
1151 }
1152 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1153 {
1154 /* Frame pointer is r7. Frame size is constant. */
1155 cache->framereg = THUMB_FP_REGNUM;
1156 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1157 }
1158 else
1159 {
1160 /* Try the stack pointer... this is a bit desperate. */
1161 cache->framereg = ARM_SP_REGNUM;
1162 cache->framesize = -regs[ARM_SP_REGNUM].k;
1163 }
1164
1165 for (i = 0; i < 16; i++)
1166 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1167 cache->saved_regs[i].addr = offset;
1168
1169 do_cleanups (back_to);
1170 return unrecognized_pc;
1171 }
1172
1173
1174 /* Try to analyze the instructions starting from PC, which load symbol
1175 __stack_chk_guard. Return the address of instruction after loading this
1176 symbol, set the dest register number to *BASEREG, and set the size of
1177 instructions for loading symbol in OFFSET. Return 0 if instructions are
1178 not recognized. */
1179
1180 static CORE_ADDR
1181 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1182 unsigned int *destreg, int *offset)
1183 {
1184 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1185 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1186 unsigned int low, high, address;
1187
1188 address = 0;
1189 if (is_thumb)
1190 {
1191 unsigned short insn1
1192 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1193
1194 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1195 {
1196 *destreg = bits (insn1, 8, 10);
1197 *offset = 2;
1198 address = bits (insn1, 0, 7);
1199 }
1200 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1201 {
1202 unsigned short insn2
1203 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1204
1205 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1206
1207 insn1
1208 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1209 insn2
1210 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1211
1212 /* movt Rd, #const */
1213 if ((insn1 & 0xfbc0) == 0xf2c0)
1214 {
1215 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1216 *destreg = bits (insn2, 8, 11);
1217 *offset = 8;
1218 address = (high << 16 | low);
1219 }
1220 }
1221 }
1222 else
1223 {
1224 unsigned int insn
1225 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1226
1227 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1228 {
1229 address = bits (insn, 0, 11);
1230 *destreg = bits (insn, 12, 15);
1231 *offset = 4;
1232 }
1233 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1234 {
1235 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1236
1237 insn
1238 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1239
1240 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1241 {
1242 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1243 *destreg = bits (insn, 12, 15);
1244 *offset = 8;
1245 address = (high << 16 | low);
1246 }
1247 }
1248 }
1249
1250 return address;
1251 }
1252
1253 /* Try to skip a sequence of instructions used for stack protector. If PC
1254 points to the first instruction of this sequence, return the address of
1255 first instruction after this sequence, otherwise, return original PC.
1256
1257 On arm, this sequence of instructions is composed of mainly three steps,
1258 Step 1: load symbol __stack_chk_guard,
1259 Step 2: load from address of __stack_chk_guard,
1260 Step 3: store it to somewhere else.
1261
1262 Usually, instructions on step 2 and step 3 are the same on various ARM
1263 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1264 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1265 instructions in step 1 vary from different ARM architectures. On ARMv7,
1266 they are,
1267
1268 movw Rn, #:lower16:__stack_chk_guard
1269 movt Rn, #:upper16:__stack_chk_guard
1270
1271 On ARMv5t, it is,
1272
1273 ldr Rn, .Label
1274 ....
1275 .Lable:
1276 .word __stack_chk_guard
1277
1278 Since ldr/str is a very popular instruction, we can't use them as
1279 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1280 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1281 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1282
1283 static CORE_ADDR
1284 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1285 {
1286 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1287 unsigned int basereg;
1288 struct minimal_symbol *stack_chk_guard;
1289 int offset;
1290 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1291 CORE_ADDR addr;
1292
1293 /* Try to parse the instructions in Step 1. */
1294 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1295 &basereg, &offset);
1296 if (!addr)
1297 return pc;
1298
1299 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1300 /* If name of symbol doesn't start with '__stack_chk_guard', this
1301 instruction sequence is not for stack protector. If symbol is
1302 removed, we conservatively think this sequence is for stack protector. */
1303 if (stack_chk_guard
1304 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard",
1305 strlen ("__stack_chk_guard")) != 0)
1306 return pc;
1307
1308 if (is_thumb)
1309 {
1310 unsigned int destreg;
1311 unsigned short insn
1312 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1313
1314 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1315 if ((insn & 0xf800) != 0x6800)
1316 return pc;
1317 if (bits (insn, 3, 5) != basereg)
1318 return pc;
1319 destreg = bits (insn, 0, 2);
1320
1321 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1322 byte_order_for_code);
1323 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6000)
1325 return pc;
1326 if (destreg != bits (insn, 0, 2))
1327 return pc;
1328 }
1329 else
1330 {
1331 unsigned int destreg;
1332 unsigned int insn
1333 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1334
1335 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1336 if ((insn & 0x0e500000) != 0x04100000)
1337 return pc;
1338 if (bits (insn, 16, 19) != basereg)
1339 return pc;
1340 destreg = bits (insn, 12, 15);
1341 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1342 insn = read_memory_unsigned_integer (pc + offset + 4,
1343 4, byte_order_for_code);
1344 if ((insn & 0x0e500000) != 0x04000000)
1345 return pc;
1346 if (bits (insn, 12, 15) != destreg)
1347 return pc;
1348 }
1349 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1350 on arm. */
1351 if (is_thumb)
1352 return pc + offset + 4;
1353 else
1354 return pc + offset + 8;
1355 }
1356
1357 /* Advance the PC across any function entry prologue instructions to
1358 reach some "real" code.
1359
1360 The APCS (ARM Procedure Call Standard) defines the following
1361 prologue:
1362
1363 mov ip, sp
1364 [stmfd sp!, {a1,a2,a3,a4}]
1365 stmfd sp!, {...,fp,ip,lr,pc}
1366 [stfe f7, [sp, #-12]!]
1367 [stfe f6, [sp, #-12]!]
1368 [stfe f5, [sp, #-12]!]
1369 [stfe f4, [sp, #-12]!]
1370 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1371
1372 static CORE_ADDR
1373 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1374 {
1375 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1376 unsigned long inst;
1377 CORE_ADDR skip_pc;
1378 CORE_ADDR func_addr, limit_pc;
1379
1380 /* See if we can determine the end of the prologue via the symbol table.
1381 If so, then return either PC, or the PC after the prologue, whichever
1382 is greater. */
1383 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1384 {
1385 CORE_ADDR post_prologue_pc
1386 = skip_prologue_using_sal (gdbarch, func_addr);
1387 struct symtab *s = find_pc_symtab (func_addr);
1388
1389 if (post_prologue_pc)
1390 post_prologue_pc
1391 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1392
1393
1394 /* GCC always emits a line note before the prologue and another
1395 one after, even if the two are at the same address or on the
1396 same line. Take advantage of this so that we do not need to
1397 know every instruction that might appear in the prologue. We
1398 will have producer information for most binaries; if it is
1399 missing (e.g. for -gstabs), assuming the GNU tools. */
1400 if (post_prologue_pc
1401 && (s == NULL
1402 || s->producer == NULL
1403 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0
1404 || strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
1405 return post_prologue_pc;
1406
1407 if (post_prologue_pc != 0)
1408 {
1409 CORE_ADDR analyzed_limit;
1410
1411 /* For non-GCC compilers, make sure the entire line is an
1412 acceptable prologue; GDB will round this function's
1413 return value up to the end of the following line so we
1414 can not skip just part of a line (and we do not want to).
1415
1416 RealView does not treat the prologue specially, but does
1417 associate prologue code with the opening brace; so this
1418 lets us skip the first line if we think it is the opening
1419 brace. */
1420 if (arm_pc_is_thumb (gdbarch, func_addr))
1421 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1422 post_prologue_pc, NULL);
1423 else
1424 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1425 post_prologue_pc, NULL);
1426
1427 if (analyzed_limit != post_prologue_pc)
1428 return func_addr;
1429
1430 return post_prologue_pc;
1431 }
1432 }
1433
1434 /* Can't determine prologue from the symbol table, need to examine
1435 instructions. */
1436
1437 /* Find an upper limit on the function prologue using the debug
1438 information. If the debug information could not be used to provide
1439 that bound, then use an arbitrary large number as the upper bound. */
1440 /* Like arm_scan_prologue, stop no later than pc + 64. */
1441 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1442 if (limit_pc == 0)
1443 limit_pc = pc + 64; /* Magic. */
1444
1445
1446 /* Check if this is Thumb code. */
1447 if (arm_pc_is_thumb (gdbarch, pc))
1448 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1449
1450 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1451 {
1452 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1453
1454 /* "mov ip, sp" is no longer a required part of the prologue. */
1455 if (inst == 0xe1a0c00d) /* mov ip, sp */
1456 continue;
1457
1458 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1459 continue;
1460
1461 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1462 continue;
1463
1464 /* Some prologues begin with "str lr, [sp, #-4]!". */
1465 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1466 continue;
1467
1468 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1469 continue;
1470
1471 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1472 continue;
1473
1474 /* Any insns after this point may float into the code, if it makes
1475 for better instruction scheduling, so we skip them only if we
1476 find them, but still consider the function to be frame-ful. */
1477
1478 /* We may have either one sfmfd instruction here, or several stfe
1479 insns, depending on the version of floating point code we
1480 support. */
1481 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1482 continue;
1483
1484 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1485 continue;
1486
1487 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1488 continue;
1489
1490 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1491 continue;
1492
1493 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1494 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1495 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1496 continue;
1497
1498 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1499 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1500 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1501 continue;
1502
1503 /* Un-recognized instruction; stop scanning. */
1504 break;
1505 }
1506
1507 return skip_pc; /* End of prologue. */
1508 }
1509
1510 /* *INDENT-OFF* */
1511 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1512 This function decodes a Thumb function prologue to determine:
1513 1) the size of the stack frame
1514 2) which registers are saved on it
1515 3) the offsets of saved regs
1516 4) the offset from the stack pointer to the frame pointer
1517
1518 A typical Thumb function prologue would create this stack frame
1519 (offsets relative to FP)
1520 old SP -> 24 stack parameters
1521 20 LR
1522 16 R7
1523 R7 -> 0 local variables (16 bytes)
1524 SP -> -12 additional stack space (12 bytes)
1525 The frame size would thus be 36 bytes, and the frame offset would be
1526 12 bytes. The frame register is R7.
1527
1528 The comments for thumb_skip_prolog() describe the algorithm we use
1529 to detect the end of the prolog. */
1530 /* *INDENT-ON* */
1531
1532 static void
1533 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1534 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1535 {
1536 CORE_ADDR prologue_start;
1537 CORE_ADDR prologue_end;
1538
1539 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1540 &prologue_end))
1541 {
1542 /* See comment in arm_scan_prologue for an explanation of
1543 this heuristics. */
1544 if (prologue_end > prologue_start + 64)
1545 {
1546 prologue_end = prologue_start + 64;
1547 }
1548 }
1549 else
1550 /* We're in the boondocks: we have no idea where the start of the
1551 function is. */
1552 return;
1553
1554 prologue_end = min (prologue_end, prev_pc);
1555
1556 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1557 }
1558
1559 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1560
1561 static int
1562 arm_instruction_changes_pc (uint32_t this_instr)
1563 {
1564 if (bits (this_instr, 28, 31) == INST_NV)
1565 /* Unconditional instructions. */
1566 switch (bits (this_instr, 24, 27))
1567 {
1568 case 0xa:
1569 case 0xb:
1570 /* Branch with Link and change to Thumb. */
1571 return 1;
1572 case 0xc:
1573 case 0xd:
1574 case 0xe:
1575 /* Coprocessor register transfer. */
1576 if (bits (this_instr, 12, 15) == 15)
1577 error (_("Invalid update to pc in instruction"));
1578 return 0;
1579 default:
1580 return 0;
1581 }
1582 else
1583 switch (bits (this_instr, 25, 27))
1584 {
1585 case 0x0:
1586 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1587 {
1588 /* Multiplies and extra load/stores. */
1589 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1590 /* Neither multiplies nor extension load/stores are allowed
1591 to modify PC. */
1592 return 0;
1593
1594 /* Otherwise, miscellaneous instructions. */
1595
1596 /* BX <reg>, BXJ <reg>, BLX <reg> */
1597 if (bits (this_instr, 4, 27) == 0x12fff1
1598 || bits (this_instr, 4, 27) == 0x12fff2
1599 || bits (this_instr, 4, 27) == 0x12fff3)
1600 return 1;
1601
1602 /* Other miscellaneous instructions are unpredictable if they
1603 modify PC. */
1604 return 0;
1605 }
1606 /* Data processing instruction. Fall through. */
1607
1608 case 0x1:
1609 if (bits (this_instr, 12, 15) == 15)
1610 return 1;
1611 else
1612 return 0;
1613
1614 case 0x2:
1615 case 0x3:
1616 /* Media instructions and architecturally undefined instructions. */
1617 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1618 return 0;
1619
1620 /* Stores. */
1621 if (bit (this_instr, 20) == 0)
1622 return 0;
1623
1624 /* Loads. */
1625 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1626 return 1;
1627 else
1628 return 0;
1629
1630 case 0x4:
1631 /* Load/store multiple. */
1632 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1633 return 1;
1634 else
1635 return 0;
1636
1637 case 0x5:
1638 /* Branch and branch with link. */
1639 return 1;
1640
1641 case 0x6:
1642 case 0x7:
1643 /* Coprocessor transfers or SWIs can not affect PC. */
1644 return 0;
1645
1646 default:
1647 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1648 }
1649 }
1650
1651 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1652 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1653 fill it in. Return the first address not recognized as a prologue
1654 instruction.
1655
1656 We recognize all the instructions typically found in ARM prologues,
1657 plus harmless instructions which can be skipped (either for analysis
1658 purposes, or a more restrictive set that can be skipped when finding
1659 the end of the prologue). */
1660
1661 static CORE_ADDR
1662 arm_analyze_prologue (struct gdbarch *gdbarch,
1663 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1664 struct arm_prologue_cache *cache)
1665 {
1666 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1667 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1668 int regno;
1669 CORE_ADDR offset, current_pc;
1670 pv_t regs[ARM_FPS_REGNUM];
1671 struct pv_area *stack;
1672 struct cleanup *back_to;
1673 int framereg, framesize;
1674 CORE_ADDR unrecognized_pc = 0;
1675
1676 /* Search the prologue looking for instructions that set up the
1677 frame pointer, adjust the stack pointer, and save registers.
1678
1679 Be careful, however, and if it doesn't look like a prologue,
1680 don't try to scan it. If, for instance, a frameless function
1681 begins with stmfd sp!, then we will tell ourselves there is
1682 a frame, which will confuse stack traceback, as well as "finish"
1683 and other operations that rely on a knowledge of the stack
1684 traceback. */
1685
1686 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1687 regs[regno] = pv_register (regno, 0);
1688 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1689 back_to = make_cleanup_free_pv_area (stack);
1690
1691 for (current_pc = prologue_start;
1692 current_pc < prologue_end;
1693 current_pc += 4)
1694 {
1695 unsigned int insn
1696 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1697
1698 if (insn == 0xe1a0c00d) /* mov ip, sp */
1699 {
1700 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1701 continue;
1702 }
1703 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1704 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1705 {
1706 unsigned imm = insn & 0xff; /* immediate value */
1707 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1708 int rd = bits (insn, 12, 15);
1709 imm = (imm >> rot) | (imm << (32 - rot));
1710 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1711 continue;
1712 }
1713 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1715 {
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1721 continue;
1722 }
1723 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1724 [sp, #-4]! */
1725 {
1726 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1727 break;
1728 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1729 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1730 regs[bits (insn, 12, 15)]);
1731 continue;
1732 }
1733 else if ((insn & 0xffff0000) == 0xe92d0000)
1734 /* stmfd sp!, {..., fp, ip, lr, pc}
1735 or
1736 stmfd sp!, {a1, a2, a3, a4} */
1737 {
1738 int mask = insn & 0xffff;
1739
1740 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1741 break;
1742
1743 /* Calculate offsets of saved registers. */
1744 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1745 if (mask & (1 << regno))
1746 {
1747 regs[ARM_SP_REGNUM]
1748 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1749 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1750 }
1751 }
1752 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1753 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1754 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1755 {
1756 /* No need to add this to saved_regs -- it's just an arg reg. */
1757 continue;
1758 }
1759 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1760 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1761 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1762 {
1763 /* No need to add this to saved_regs -- it's just an arg reg. */
1764 continue;
1765 }
1766 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1767 { registers } */
1768 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1769 {
1770 /* No need to add this to saved_regs -- it's just arg regs. */
1771 continue;
1772 }
1773 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1774 {
1775 unsigned imm = insn & 0xff; /* immediate value */
1776 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1777 imm = (imm >> rot) | (imm << (32 - rot));
1778 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1779 }
1780 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1781 {
1782 unsigned imm = insn & 0xff; /* immediate value */
1783 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1784 imm = (imm >> rot) | (imm << (32 - rot));
1785 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1786 }
1787 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1788 [sp, -#c]! */
1789 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1790 {
1791 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1792 break;
1793
1794 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1795 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1796 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1797 }
1798 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1799 [sp!] */
1800 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1801 {
1802 int n_saved_fp_regs;
1803 unsigned int fp_start_reg, fp_bound_reg;
1804
1805 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1806 break;
1807
1808 if ((insn & 0x800) == 0x800) /* N0 is set */
1809 {
1810 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1811 n_saved_fp_regs = 3;
1812 else
1813 n_saved_fp_regs = 1;
1814 }
1815 else
1816 {
1817 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1818 n_saved_fp_regs = 2;
1819 else
1820 n_saved_fp_regs = 4;
1821 }
1822
1823 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1824 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1825 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1826 {
1827 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1828 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1829 regs[fp_start_reg++]);
1830 }
1831 }
1832 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1833 {
1834 /* Allow some special function calls when skipping the
1835 prologue; GCC generates these before storing arguments to
1836 the stack. */
1837 CORE_ADDR dest = BranchDest (current_pc, insn);
1838
1839 if (skip_prologue_function (gdbarch, dest, 0))
1840 continue;
1841 else
1842 break;
1843 }
1844 else if ((insn & 0xf0000000) != 0xe0000000)
1845 break; /* Condition not true, exit early. */
1846 else if (arm_instruction_changes_pc (insn))
1847 /* Don't scan past anything that might change control flow. */
1848 break;
1849 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1850 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1851 /* Ignore block loads from the stack, potentially copying
1852 parameters from memory. */
1853 continue;
1854 else if ((insn & 0xfc500000) == 0xe4100000
1855 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1856 /* Similarly ignore single loads from the stack. */
1857 continue;
1858 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1859 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1860 register instead of the stack. */
1861 continue;
1862 else
1863 {
1864 /* The optimizer might shove anything into the prologue,
1865 so we just skip what we don't recognize. */
1866 unrecognized_pc = current_pc;
1867 continue;
1868 }
1869 }
1870
1871 if (unrecognized_pc == 0)
1872 unrecognized_pc = current_pc;
1873
1874 /* The frame size is just the distance from the frame register
1875 to the original stack pointer. */
1876 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1877 {
1878 /* Frame pointer is fp. */
1879 framereg = ARM_FP_REGNUM;
1880 framesize = -regs[ARM_FP_REGNUM].k;
1881 }
1882 else
1883 {
1884 /* Try the stack pointer... this is a bit desperate. */
1885 framereg = ARM_SP_REGNUM;
1886 framesize = -regs[ARM_SP_REGNUM].k;
1887 }
1888
1889 if (cache)
1890 {
1891 cache->framereg = framereg;
1892 cache->framesize = framesize;
1893
1894 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1895 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1896 cache->saved_regs[regno].addr = offset;
1897 }
1898
1899 if (arm_debug)
1900 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1901 paddress (gdbarch, unrecognized_pc));
1902
1903 do_cleanups (back_to);
1904 return unrecognized_pc;
1905 }
1906
1907 static void
1908 arm_scan_prologue (struct frame_info *this_frame,
1909 struct arm_prologue_cache *cache)
1910 {
1911 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1913 int regno;
1914 CORE_ADDR prologue_start, prologue_end, current_pc;
1915 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1916 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1917 pv_t regs[ARM_FPS_REGNUM];
1918 struct pv_area *stack;
1919 struct cleanup *back_to;
1920 CORE_ADDR offset;
1921
1922 /* Assume there is no frame until proven otherwise. */
1923 cache->framereg = ARM_SP_REGNUM;
1924 cache->framesize = 0;
1925
1926 /* Check for Thumb prologue. */
1927 if (arm_frame_is_thumb (this_frame))
1928 {
1929 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1930 return;
1931 }
1932
1933 /* Find the function prologue. If we can't find the function in
1934 the symbol table, peek in the stack frame to find the PC. */
1935 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1936 &prologue_end))
1937 {
1938 /* One way to find the end of the prologue (which works well
1939 for unoptimized code) is to do the following:
1940
1941 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1942
1943 if (sal.line == 0)
1944 prologue_end = prev_pc;
1945 else if (sal.end < prologue_end)
1946 prologue_end = sal.end;
1947
1948 This mechanism is very accurate so long as the optimizer
1949 doesn't move any instructions from the function body into the
1950 prologue. If this happens, sal.end will be the last
1951 instruction in the first hunk of prologue code just before
1952 the first instruction that the scheduler has moved from
1953 the body to the prologue.
1954
1955 In order to make sure that we scan all of the prologue
1956 instructions, we use a slightly less accurate mechanism which
1957 may scan more than necessary. To help compensate for this
1958 lack of accuracy, the prologue scanning loop below contains
1959 several clauses which'll cause the loop to terminate early if
1960 an implausible prologue instruction is encountered.
1961
1962 The expression
1963
1964 prologue_start + 64
1965
1966 is a suitable endpoint since it accounts for the largest
1967 possible prologue plus up to five instructions inserted by
1968 the scheduler. */
1969
1970 if (prologue_end > prologue_start + 64)
1971 {
1972 prologue_end = prologue_start + 64; /* See above. */
1973 }
1974 }
1975 else
1976 {
1977 /* We have no symbol information. Our only option is to assume this
1978 function has a standard stack frame and the normal frame register.
1979 Then, we can find the value of our frame pointer on entrance to
1980 the callee (or at the present moment if this is the innermost frame).
1981 The value stored there should be the address of the stmfd + 8. */
1982 CORE_ADDR frame_loc;
1983 LONGEST return_value;
1984
1985 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1986 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1987 return;
1988 else
1989 {
1990 prologue_start = gdbarch_addr_bits_remove
1991 (gdbarch, return_value) - 8;
1992 prologue_end = prologue_start + 64; /* See above. */
1993 }
1994 }
1995
1996 if (prev_pc < prologue_end)
1997 prologue_end = prev_pc;
1998
1999 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2000 }
2001
2002 static struct arm_prologue_cache *
2003 arm_make_prologue_cache (struct frame_info *this_frame)
2004 {
2005 int reg;
2006 struct arm_prologue_cache *cache;
2007 CORE_ADDR unwound_fp;
2008
2009 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2010 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2011
2012 arm_scan_prologue (this_frame, cache);
2013
2014 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2015 if (unwound_fp == 0)
2016 return cache;
2017
2018 cache->prev_sp = unwound_fp + cache->framesize;
2019
2020 /* Calculate actual addresses of saved registers using offsets
2021 determined by arm_scan_prologue. */
2022 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2023 if (trad_frame_addr_p (cache->saved_regs, reg))
2024 cache->saved_regs[reg].addr += cache->prev_sp;
2025
2026 return cache;
2027 }
2028
2029 /* Our frame ID for a normal frame is the current function's starting PC
2030 and the caller's SP when we were called. */
2031
2032 static void
2033 arm_prologue_this_id (struct frame_info *this_frame,
2034 void **this_cache,
2035 struct frame_id *this_id)
2036 {
2037 struct arm_prologue_cache *cache;
2038 struct frame_id id;
2039 CORE_ADDR pc, func;
2040
2041 if (*this_cache == NULL)
2042 *this_cache = arm_make_prologue_cache (this_frame);
2043 cache = *this_cache;
2044
2045 /* This is meant to halt the backtrace at "_start". */
2046 pc = get_frame_pc (this_frame);
2047 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2048 return;
2049
2050 /* If we've hit a wall, stop. */
2051 if (cache->prev_sp == 0)
2052 return;
2053
2054 /* Use function start address as part of the frame ID. If we cannot
2055 identify the start address (due to missing symbol information),
2056 fall back to just using the current PC. */
2057 func = get_frame_func (this_frame);
2058 if (!func)
2059 func = pc;
2060
2061 id = frame_id_build (cache->prev_sp, func);
2062 *this_id = id;
2063 }
2064
2065 static struct value *
2066 arm_prologue_prev_register (struct frame_info *this_frame,
2067 void **this_cache,
2068 int prev_regnum)
2069 {
2070 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2071 struct arm_prologue_cache *cache;
2072
2073 if (*this_cache == NULL)
2074 *this_cache = arm_make_prologue_cache (this_frame);
2075 cache = *this_cache;
2076
2077 /* If we are asked to unwind the PC, then we need to return the LR
2078 instead. The prologue may save PC, but it will point into this
2079 frame's prologue, not the next frame's resume location. Also
2080 strip the saved T bit. A valid LR may have the low bit set, but
2081 a valid PC never does. */
2082 if (prev_regnum == ARM_PC_REGNUM)
2083 {
2084 CORE_ADDR lr;
2085
2086 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2087 return frame_unwind_got_constant (this_frame, prev_regnum,
2088 arm_addr_bits_remove (gdbarch, lr));
2089 }
2090
2091 /* SP is generally not saved to the stack, but this frame is
2092 identified by the next frame's stack pointer at the time of the call.
2093 The value was already reconstructed into PREV_SP. */
2094 if (prev_regnum == ARM_SP_REGNUM)
2095 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2096
2097 /* The CPSR may have been changed by the call instruction and by the
2098 called function. The only bit we can reconstruct is the T bit,
2099 by checking the low bit of LR as of the call. This is a reliable
2100 indicator of Thumb-ness except for some ARM v4T pre-interworking
2101 Thumb code, which could get away with a clear low bit as long as
2102 the called function did not use bx. Guess that all other
2103 bits are unchanged; the condition flags are presumably lost,
2104 but the processor status is likely valid. */
2105 if (prev_regnum == ARM_PS_REGNUM)
2106 {
2107 CORE_ADDR lr, cpsr;
2108 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2109
2110 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2111 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2112 if (IS_THUMB_ADDR (lr))
2113 cpsr |= t_bit;
2114 else
2115 cpsr &= ~t_bit;
2116 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2117 }
2118
2119 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2120 prev_regnum);
2121 }
2122
2123 struct frame_unwind arm_prologue_unwind = {
2124 NORMAL_FRAME,
2125 default_frame_unwind_stop_reason,
2126 arm_prologue_this_id,
2127 arm_prologue_prev_register,
2128 NULL,
2129 default_frame_sniffer
2130 };
2131
2132 /* Maintain a list of ARM exception table entries per objfile, similar to the
2133 list of mapping symbols. We only cache entries for standard ARM-defined
2134 personality routines; the cache will contain only the frame unwinding
2135 instructions associated with the entry (not the descriptors). */
2136
2137 static const struct objfile_data *arm_exidx_data_key;
2138
2139 struct arm_exidx_entry
2140 {
2141 bfd_vma addr;
2142 gdb_byte *entry;
2143 };
2144 typedef struct arm_exidx_entry arm_exidx_entry_s;
2145 DEF_VEC_O(arm_exidx_entry_s);
2146
2147 struct arm_exidx_data
2148 {
2149 VEC(arm_exidx_entry_s) **section_maps;
2150 };
2151
2152 static void
2153 arm_exidx_data_free (struct objfile *objfile, void *arg)
2154 {
2155 struct arm_exidx_data *data = arg;
2156 unsigned int i;
2157
2158 for (i = 0; i < objfile->obfd->section_count; i++)
2159 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2160 }
2161
2162 static inline int
2163 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2164 const struct arm_exidx_entry *rhs)
2165 {
2166 return lhs->addr < rhs->addr;
2167 }
2168
2169 static struct obj_section *
2170 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2171 {
2172 struct obj_section *osect;
2173
2174 ALL_OBJFILE_OSECTIONS (objfile, osect)
2175 if (bfd_get_section_flags (objfile->obfd,
2176 osect->the_bfd_section) & SEC_ALLOC)
2177 {
2178 bfd_vma start, size;
2179 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2180 size = bfd_get_section_size (osect->the_bfd_section);
2181
2182 if (start <= vma && vma < start + size)
2183 return osect;
2184 }
2185
2186 return NULL;
2187 }
2188
2189 /* Parse contents of exception table and exception index sections
2190 of OBJFILE, and fill in the exception table entry cache.
2191
2192 For each entry that refers to a standard ARM-defined personality
2193 routine, extract the frame unwinding instructions (from either
2194 the index or the table section). The unwinding instructions
2195 are normalized by:
2196 - extracting them from the rest of the table data
2197 - converting to host endianness
2198 - appending the implicit 0xb0 ("Finish") code
2199
2200 The extracted and normalized instructions are stored for later
2201 retrieval by the arm_find_exidx_entry routine. */
2202
2203 static void
2204 arm_exidx_new_objfile (struct objfile *objfile)
2205 {
2206 struct cleanup *cleanups;
2207 struct arm_exidx_data *data;
2208 asection *exidx, *extab;
2209 bfd_vma exidx_vma = 0, extab_vma = 0;
2210 bfd_size_type exidx_size = 0, extab_size = 0;
2211 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2212 LONGEST i;
2213
2214 /* If we've already touched this file, do nothing. */
2215 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2216 return;
2217 cleanups = make_cleanup (null_cleanup, NULL);
2218
2219 /* Read contents of exception table and index. */
2220 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2221 if (exidx)
2222 {
2223 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2224 exidx_size = bfd_get_section_size (exidx);
2225 exidx_data = xmalloc (exidx_size);
2226 make_cleanup (xfree, exidx_data);
2227
2228 if (!bfd_get_section_contents (objfile->obfd, exidx,
2229 exidx_data, 0, exidx_size))
2230 {
2231 do_cleanups (cleanups);
2232 return;
2233 }
2234 }
2235
2236 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2237 if (extab)
2238 {
2239 extab_vma = bfd_section_vma (objfile->obfd, extab);
2240 extab_size = bfd_get_section_size (extab);
2241 extab_data = xmalloc (extab_size);
2242 make_cleanup (xfree, extab_data);
2243
2244 if (!bfd_get_section_contents (objfile->obfd, extab,
2245 extab_data, 0, extab_size))
2246 {
2247 do_cleanups (cleanups);
2248 return;
2249 }
2250 }
2251
2252 /* Allocate exception table data structure. */
2253 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2254 set_objfile_data (objfile, arm_exidx_data_key, data);
2255 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2256 objfile->obfd->section_count,
2257 VEC(arm_exidx_entry_s) *);
2258
2259 /* Fill in exception table. */
2260 for (i = 0; i < exidx_size / 8; i++)
2261 {
2262 struct arm_exidx_entry new_exidx_entry;
2263 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2264 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2265 bfd_vma addr = 0, word = 0;
2266 int n_bytes = 0, n_words = 0;
2267 struct obj_section *sec;
2268 gdb_byte *entry = NULL;
2269
2270 /* Extract address of start of function. */
2271 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2272 idx += exidx_vma + i * 8;
2273
2274 /* Find section containing function and compute section offset. */
2275 sec = arm_obj_section_from_vma (objfile, idx);
2276 if (sec == NULL)
2277 continue;
2278 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2279
2280 /* Determine address of exception table entry. */
2281 if (val == 1)
2282 {
2283 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2284 }
2285 else if ((val & 0xff000000) == 0x80000000)
2286 {
2287 /* Exception table entry embedded in .ARM.exidx
2288 -- must be short form. */
2289 word = val;
2290 n_bytes = 3;
2291 }
2292 else if (!(val & 0x80000000))
2293 {
2294 /* Exception table entry in .ARM.extab. */
2295 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2296 addr += exidx_vma + i * 8 + 4;
2297
2298 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2299 {
2300 word = bfd_h_get_32 (objfile->obfd,
2301 extab_data + addr - extab_vma);
2302 addr += 4;
2303
2304 if ((word & 0xff000000) == 0x80000000)
2305 {
2306 /* Short form. */
2307 n_bytes = 3;
2308 }
2309 else if ((word & 0xff000000) == 0x81000000
2310 || (word & 0xff000000) == 0x82000000)
2311 {
2312 /* Long form. */
2313 n_bytes = 2;
2314 n_words = ((word >> 16) & 0xff);
2315 }
2316 else if (!(word & 0x80000000))
2317 {
2318 bfd_vma pers;
2319 struct obj_section *pers_sec;
2320 int gnu_personality = 0;
2321
2322 /* Custom personality routine. */
2323 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2324 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2325
2326 /* Check whether we've got one of the variants of the
2327 GNU personality routines. */
2328 pers_sec = arm_obj_section_from_vma (objfile, pers);
2329 if (pers_sec)
2330 {
2331 static const char *personality[] =
2332 {
2333 "__gcc_personality_v0",
2334 "__gxx_personality_v0",
2335 "__gcj_personality_v0",
2336 "__gnu_objc_personality_v0",
2337 NULL
2338 };
2339
2340 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2341 int k;
2342
2343 for (k = 0; personality[k]; k++)
2344 if (lookup_minimal_symbol_by_pc_name
2345 (pc, personality[k], objfile))
2346 {
2347 gnu_personality = 1;
2348 break;
2349 }
2350 }
2351
2352 /* If so, the next word contains a word count in the high
2353 byte, followed by the same unwind instructions as the
2354 pre-defined forms. */
2355 if (gnu_personality
2356 && addr + 4 <= extab_vma + extab_size)
2357 {
2358 word = bfd_h_get_32 (objfile->obfd,
2359 extab_data + addr - extab_vma);
2360 addr += 4;
2361 n_bytes = 3;
2362 n_words = ((word >> 24) & 0xff);
2363 }
2364 }
2365 }
2366 }
2367
2368 /* Sanity check address. */
2369 if (n_words)
2370 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2371 n_words = n_bytes = 0;
2372
2373 /* The unwind instructions reside in WORD (only the N_BYTES least
2374 significant bytes are valid), followed by N_WORDS words in the
2375 extab section starting at ADDR. */
2376 if (n_bytes || n_words)
2377 {
2378 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2379 n_bytes + n_words * 4 + 1);
2380
2381 while (n_bytes--)
2382 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2383
2384 while (n_words--)
2385 {
2386 word = bfd_h_get_32 (objfile->obfd,
2387 extab_data + addr - extab_vma);
2388 addr += 4;
2389
2390 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2391 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2392 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2393 *p++ = (gdb_byte) (word & 0xff);
2394 }
2395
2396 /* Implied "Finish" to terminate the list. */
2397 *p++ = 0xb0;
2398 }
2399
2400 /* Push entry onto vector. They are guaranteed to always
2401 appear in order of increasing addresses. */
2402 new_exidx_entry.addr = idx;
2403 new_exidx_entry.entry = entry;
2404 VEC_safe_push (arm_exidx_entry_s,
2405 data->section_maps[sec->the_bfd_section->index],
2406 &new_exidx_entry);
2407 }
2408
2409 do_cleanups (cleanups);
2410 }
2411
2412 /* Search for the exception table entry covering MEMADDR. If one is found,
2413 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2414 set *START to the start of the region covered by this entry. */
2415
2416 static gdb_byte *
2417 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2418 {
2419 struct obj_section *sec;
2420
2421 sec = find_pc_section (memaddr);
2422 if (sec != NULL)
2423 {
2424 struct arm_exidx_data *data;
2425 VEC(arm_exidx_entry_s) *map;
2426 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2427 unsigned int idx;
2428
2429 data = objfile_data (sec->objfile, arm_exidx_data_key);
2430 if (data != NULL)
2431 {
2432 map = data->section_maps[sec->the_bfd_section->index];
2433 if (!VEC_empty (arm_exidx_entry_s, map))
2434 {
2435 struct arm_exidx_entry *map_sym;
2436
2437 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2438 arm_compare_exidx_entries);
2439
2440 /* VEC_lower_bound finds the earliest ordered insertion
2441 point. If the following symbol starts at this exact
2442 address, we use that; otherwise, the preceding
2443 exception table entry covers this address. */
2444 if (idx < VEC_length (arm_exidx_entry_s, map))
2445 {
2446 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2447 if (map_sym->addr == map_key.addr)
2448 {
2449 if (start)
2450 *start = map_sym->addr + obj_section_addr (sec);
2451 return map_sym->entry;
2452 }
2453 }
2454
2455 if (idx > 0)
2456 {
2457 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2458 if (start)
2459 *start = map_sym->addr + obj_section_addr (sec);
2460 return map_sym->entry;
2461 }
2462 }
2463 }
2464 }
2465
2466 return NULL;
2467 }
2468
2469 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2470 instruction list from the ARM exception table entry ENTRY, allocate and
2471 return a prologue cache structure describing how to unwind this frame.
2472
2473 Return NULL if the unwinding instruction list contains a "spare",
2474 "reserved" or "refuse to unwind" instruction as defined in section
2475 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2476 for the ARM Architecture" document. */
2477
2478 static struct arm_prologue_cache *
2479 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2480 {
2481 CORE_ADDR vsp = 0;
2482 int vsp_valid = 0;
2483
2484 struct arm_prologue_cache *cache;
2485 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2486 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2487
2488 for (;;)
2489 {
2490 gdb_byte insn;
2491
2492 /* Whenever we reload SP, we actually have to retrieve its
2493 actual value in the current frame. */
2494 if (!vsp_valid)
2495 {
2496 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2497 {
2498 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2499 vsp = get_frame_register_unsigned (this_frame, reg);
2500 }
2501 else
2502 {
2503 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2504 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2505 }
2506
2507 vsp_valid = 1;
2508 }
2509
2510 /* Decode next unwind instruction. */
2511 insn = *entry++;
2512
2513 if ((insn & 0xc0) == 0)
2514 {
2515 int offset = insn & 0x3f;
2516 vsp += (offset << 2) + 4;
2517 }
2518 else if ((insn & 0xc0) == 0x40)
2519 {
2520 int offset = insn & 0x3f;
2521 vsp -= (offset << 2) + 4;
2522 }
2523 else if ((insn & 0xf0) == 0x80)
2524 {
2525 int mask = ((insn & 0xf) << 8) | *entry++;
2526 int i;
2527
2528 /* The special case of an all-zero mask identifies
2529 "Refuse to unwind". We return NULL to fall back
2530 to the prologue analyzer. */
2531 if (mask == 0)
2532 return NULL;
2533
2534 /* Pop registers r4..r15 under mask. */
2535 for (i = 0; i < 12; i++)
2536 if (mask & (1 << i))
2537 {
2538 cache->saved_regs[4 + i].addr = vsp;
2539 vsp += 4;
2540 }
2541
2542 /* Special-case popping SP -- we need to reload vsp. */
2543 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2544 vsp_valid = 0;
2545 }
2546 else if ((insn & 0xf0) == 0x90)
2547 {
2548 int reg = insn & 0xf;
2549
2550 /* Reserved cases. */
2551 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2552 return NULL;
2553
2554 /* Set SP from another register and mark VSP for reload. */
2555 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2556 vsp_valid = 0;
2557 }
2558 else if ((insn & 0xf0) == 0xa0)
2559 {
2560 int count = insn & 0x7;
2561 int pop_lr = (insn & 0x8) != 0;
2562 int i;
2563
2564 /* Pop r4..r[4+count]. */
2565 for (i = 0; i <= count; i++)
2566 {
2567 cache->saved_regs[4 + i].addr = vsp;
2568 vsp += 4;
2569 }
2570
2571 /* If indicated by flag, pop LR as well. */
2572 if (pop_lr)
2573 {
2574 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2575 vsp += 4;
2576 }
2577 }
2578 else if (insn == 0xb0)
2579 {
2580 /* We could only have updated PC by popping into it; if so, it
2581 will show up as address. Otherwise, copy LR into PC. */
2582 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2583 cache->saved_regs[ARM_PC_REGNUM]
2584 = cache->saved_regs[ARM_LR_REGNUM];
2585
2586 /* We're done. */
2587 break;
2588 }
2589 else if (insn == 0xb1)
2590 {
2591 int mask = *entry++;
2592 int i;
2593
2594 /* All-zero mask and mask >= 16 is "spare". */
2595 if (mask == 0 || mask >= 16)
2596 return NULL;
2597
2598 /* Pop r0..r3 under mask. */
2599 for (i = 0; i < 4; i++)
2600 if (mask & (1 << i))
2601 {
2602 cache->saved_regs[i].addr = vsp;
2603 vsp += 4;
2604 }
2605 }
2606 else if (insn == 0xb2)
2607 {
2608 ULONGEST offset = 0;
2609 unsigned shift = 0;
2610
2611 do
2612 {
2613 offset |= (*entry & 0x7f) << shift;
2614 shift += 7;
2615 }
2616 while (*entry++ & 0x80);
2617
2618 vsp += 0x204 + (offset << 2);
2619 }
2620 else if (insn == 0xb3)
2621 {
2622 int start = *entry >> 4;
2623 int count = (*entry++) & 0xf;
2624 int i;
2625
2626 /* Only registers D0..D15 are valid here. */
2627 if (start + count >= 16)
2628 return NULL;
2629
2630 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2631 for (i = 0; i <= count; i++)
2632 {
2633 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2634 vsp += 8;
2635 }
2636
2637 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2638 vsp += 4;
2639 }
2640 else if ((insn & 0xf8) == 0xb8)
2641 {
2642 int count = insn & 0x7;
2643 int i;
2644
2645 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2646 for (i = 0; i <= count; i++)
2647 {
2648 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2649 vsp += 8;
2650 }
2651
2652 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2653 vsp += 4;
2654 }
2655 else if (insn == 0xc6)
2656 {
2657 int start = *entry >> 4;
2658 int count = (*entry++) & 0xf;
2659 int i;
2660
2661 /* Only registers WR0..WR15 are valid. */
2662 if (start + count >= 16)
2663 return NULL;
2664
2665 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2666 for (i = 0; i <= count; i++)
2667 {
2668 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2669 vsp += 8;
2670 }
2671 }
2672 else if (insn == 0xc7)
2673 {
2674 int mask = *entry++;
2675 int i;
2676
2677 /* All-zero mask and mask >= 16 is "spare". */
2678 if (mask == 0 || mask >= 16)
2679 return NULL;
2680
2681 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2682 for (i = 0; i < 4; i++)
2683 if (mask & (1 << i))
2684 {
2685 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2686 vsp += 4;
2687 }
2688 }
2689 else if ((insn & 0xf8) == 0xc0)
2690 {
2691 int count = insn & 0x7;
2692 int i;
2693
2694 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2695 for (i = 0; i <= count; i++)
2696 {
2697 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2698 vsp += 8;
2699 }
2700 }
2701 else if (insn == 0xc8)
2702 {
2703 int start = *entry >> 4;
2704 int count = (*entry++) & 0xf;
2705 int i;
2706
2707 /* Only registers D0..D31 are valid. */
2708 if (start + count >= 16)
2709 return NULL;
2710
2711 /* Pop VFP double-precision registers
2712 D[16+start]..D[16+start+count]. */
2713 for (i = 0; i <= count; i++)
2714 {
2715 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2716 vsp += 8;
2717 }
2718 }
2719 else if (insn == 0xc9)
2720 {
2721 int start = *entry >> 4;
2722 int count = (*entry++) & 0xf;
2723 int i;
2724
2725 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2726 for (i = 0; i <= count; i++)
2727 {
2728 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2729 vsp += 8;
2730 }
2731 }
2732 else if ((insn & 0xf8) == 0xd0)
2733 {
2734 int count = insn & 0x7;
2735 int i;
2736
2737 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2738 for (i = 0; i <= count; i++)
2739 {
2740 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2741 vsp += 8;
2742 }
2743 }
2744 else
2745 {
2746 /* Everything else is "spare". */
2747 return NULL;
2748 }
2749 }
2750
2751 /* If we restore SP from a register, assume this was the frame register.
2752 Otherwise just fall back to SP as frame register. */
2753 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2754 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2755 else
2756 cache->framereg = ARM_SP_REGNUM;
2757
2758 /* Determine offset to previous frame. */
2759 cache->framesize
2760 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2761
2762 /* We already got the previous SP. */
2763 cache->prev_sp = vsp;
2764
2765 return cache;
2766 }
2767
2768 /* Unwinding via ARM exception table entries. Note that the sniffer
2769 already computes a filled-in prologue cache, which is then used
2770 with the same arm_prologue_this_id and arm_prologue_prev_register
2771 routines also used for prologue-parsing based unwinding. */
2772
2773 static int
2774 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2775 struct frame_info *this_frame,
2776 void **this_prologue_cache)
2777 {
2778 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2779 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2780 CORE_ADDR addr_in_block, exidx_region, func_start;
2781 struct arm_prologue_cache *cache;
2782 gdb_byte *entry;
2783
2784 /* See if we have an ARM exception table entry covering this address. */
2785 addr_in_block = get_frame_address_in_block (this_frame);
2786 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2787 if (!entry)
2788 return 0;
2789
2790 /* The ARM exception table does not describe unwind information
2791 for arbitrary PC values, but is guaranteed to be correct only
2792 at call sites. We have to decide here whether we want to use
2793 ARM exception table information for this frame, or fall back
2794 to using prologue parsing. (Note that if we have DWARF CFI,
2795 this sniffer isn't even called -- CFI is always preferred.)
2796
2797 Before we make this decision, however, we check whether we
2798 actually have *symbol* information for the current frame.
2799 If not, prologue parsing would not work anyway, so we might
2800 as well use the exception table and hope for the best. */
2801 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2802 {
2803 int exc_valid = 0;
2804
2805 /* If the next frame is "normal", we are at a call site in this
2806 frame, so exception information is guaranteed to be valid. */
2807 if (get_next_frame (this_frame)
2808 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2809 exc_valid = 1;
2810
2811 /* We also assume exception information is valid if we're currently
2812 blocked in a system call. The system library is supposed to
2813 ensure this, so that e.g. pthread cancellation works. */
2814 if (arm_frame_is_thumb (this_frame))
2815 {
2816 LONGEST insn;
2817
2818 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2819 byte_order_for_code, &insn)
2820 && (insn & 0xff00) == 0xdf00 /* svc */)
2821 exc_valid = 1;
2822 }
2823 else
2824 {
2825 LONGEST insn;
2826
2827 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2828 byte_order_for_code, &insn)
2829 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2830 exc_valid = 1;
2831 }
2832
2833 /* Bail out if we don't know that exception information is valid. */
2834 if (!exc_valid)
2835 return 0;
2836
2837 /* The ARM exception index does not mark the *end* of the region
2838 covered by the entry, and some functions will not have any entry.
2839 To correctly recognize the end of the covered region, the linker
2840 should have inserted dummy records with a CANTUNWIND marker.
2841
2842 Unfortunately, current versions of GNU ld do not reliably do
2843 this, and thus we may have found an incorrect entry above.
2844 As a (temporary) sanity check, we only use the entry if it
2845 lies *within* the bounds of the function. Note that this check
2846 might reject perfectly valid entries that just happen to cover
2847 multiple functions; therefore this check ought to be removed
2848 once the linker is fixed. */
2849 if (func_start > exidx_region)
2850 return 0;
2851 }
2852
2853 /* Decode the list of unwinding instructions into a prologue cache.
2854 Note that this may fail due to e.g. a "refuse to unwind" code. */
2855 cache = arm_exidx_fill_cache (this_frame, entry);
2856 if (!cache)
2857 return 0;
2858
2859 *this_prologue_cache = cache;
2860 return 1;
2861 }
2862
2863 struct frame_unwind arm_exidx_unwind = {
2864 NORMAL_FRAME,
2865 default_frame_unwind_stop_reason,
2866 arm_prologue_this_id,
2867 arm_prologue_prev_register,
2868 NULL,
2869 arm_exidx_unwind_sniffer
2870 };
2871
2872 static struct arm_prologue_cache *
2873 arm_make_stub_cache (struct frame_info *this_frame)
2874 {
2875 struct arm_prologue_cache *cache;
2876
2877 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2878 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2879
2880 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2881
2882 return cache;
2883 }
2884
2885 /* Our frame ID for a stub frame is the current SP and LR. */
2886
2887 static void
2888 arm_stub_this_id (struct frame_info *this_frame,
2889 void **this_cache,
2890 struct frame_id *this_id)
2891 {
2892 struct arm_prologue_cache *cache;
2893
2894 if (*this_cache == NULL)
2895 *this_cache = arm_make_stub_cache (this_frame);
2896 cache = *this_cache;
2897
2898 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2899 }
2900
2901 static int
2902 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2903 struct frame_info *this_frame,
2904 void **this_prologue_cache)
2905 {
2906 CORE_ADDR addr_in_block;
2907 char dummy[4];
2908
2909 addr_in_block = get_frame_address_in_block (this_frame);
2910 if (in_plt_section (addr_in_block, NULL)
2911 /* We also use the stub winder if the target memory is unreadable
2912 to avoid having the prologue unwinder trying to read it. */
2913 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2914 return 1;
2915
2916 return 0;
2917 }
2918
2919 struct frame_unwind arm_stub_unwind = {
2920 NORMAL_FRAME,
2921 default_frame_unwind_stop_reason,
2922 arm_stub_this_id,
2923 arm_prologue_prev_register,
2924 NULL,
2925 arm_stub_unwind_sniffer
2926 };
2927
2928 static CORE_ADDR
2929 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
2930 {
2931 struct arm_prologue_cache *cache;
2932
2933 if (*this_cache == NULL)
2934 *this_cache = arm_make_prologue_cache (this_frame);
2935 cache = *this_cache;
2936
2937 return cache->prev_sp - cache->framesize;
2938 }
2939
2940 struct frame_base arm_normal_base = {
2941 &arm_prologue_unwind,
2942 arm_normal_frame_base,
2943 arm_normal_frame_base,
2944 arm_normal_frame_base
2945 };
2946
2947 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
2948 dummy frame. The frame ID's base needs to match the TOS value
2949 saved by save_dummy_frame_tos() and returned from
2950 arm_push_dummy_call, and the PC needs to match the dummy frame's
2951 breakpoint. */
2952
2953 static struct frame_id
2954 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2955 {
2956 return frame_id_build (get_frame_register_unsigned (this_frame,
2957 ARM_SP_REGNUM),
2958 get_frame_pc (this_frame));
2959 }
2960
2961 /* Given THIS_FRAME, find the previous frame's resume PC (which will
2962 be used to construct the previous frame's ID, after looking up the
2963 containing function). */
2964
2965 static CORE_ADDR
2966 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
2967 {
2968 CORE_ADDR pc;
2969 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
2970 return arm_addr_bits_remove (gdbarch, pc);
2971 }
2972
2973 static CORE_ADDR
2974 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
2975 {
2976 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
2977 }
2978
2979 static struct value *
2980 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
2981 int regnum)
2982 {
2983 struct gdbarch * gdbarch = get_frame_arch (this_frame);
2984 CORE_ADDR lr, cpsr;
2985 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2986
2987 switch (regnum)
2988 {
2989 case ARM_PC_REGNUM:
2990 /* The PC is normally copied from the return column, which
2991 describes saves of LR. However, that version may have an
2992 extra bit set to indicate Thumb state. The bit is not
2993 part of the PC. */
2994 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2995 return frame_unwind_got_constant (this_frame, regnum,
2996 arm_addr_bits_remove (gdbarch, lr));
2997
2998 case ARM_PS_REGNUM:
2999 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3000 cpsr = get_frame_register_unsigned (this_frame, regnum);
3001 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3002 if (IS_THUMB_ADDR (lr))
3003 cpsr |= t_bit;
3004 else
3005 cpsr &= ~t_bit;
3006 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3007
3008 default:
3009 internal_error (__FILE__, __LINE__,
3010 _("Unexpected register %d"), regnum);
3011 }
3012 }
3013
3014 static void
3015 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3016 struct dwarf2_frame_state_reg *reg,
3017 struct frame_info *this_frame)
3018 {
3019 switch (regnum)
3020 {
3021 case ARM_PC_REGNUM:
3022 case ARM_PS_REGNUM:
3023 reg->how = DWARF2_FRAME_REG_FN;
3024 reg->loc.fn = arm_dwarf2_prev_register;
3025 break;
3026 case ARM_SP_REGNUM:
3027 reg->how = DWARF2_FRAME_REG_CFA;
3028 break;
3029 }
3030 }
3031
3032 /* Return true if we are in the function's epilogue, i.e. after the
3033 instruction that destroyed the function's stack frame. */
3034
3035 static int
3036 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3037 {
3038 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3039 unsigned int insn, insn2;
3040 int found_return = 0, found_stack_adjust = 0;
3041 CORE_ADDR func_start, func_end;
3042 CORE_ADDR scan_pc;
3043 gdb_byte buf[4];
3044
3045 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3046 return 0;
3047
3048 /* The epilogue is a sequence of instructions along the following lines:
3049
3050 - add stack frame size to SP or FP
3051 - [if frame pointer used] restore SP from FP
3052 - restore registers from SP [may include PC]
3053 - a return-type instruction [if PC wasn't already restored]
3054
3055 In a first pass, we scan forward from the current PC and verify the
3056 instructions we find as compatible with this sequence, ending in a
3057 return instruction.
3058
3059 However, this is not sufficient to distinguish indirect function calls
3060 within a function from indirect tail calls in the epilogue in some cases.
3061 Therefore, if we didn't already find any SP-changing instruction during
3062 forward scan, we add a backward scanning heuristic to ensure we actually
3063 are in the epilogue. */
3064
3065 scan_pc = pc;
3066 while (scan_pc < func_end && !found_return)
3067 {
3068 if (target_read_memory (scan_pc, buf, 2))
3069 break;
3070
3071 scan_pc += 2;
3072 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3073
3074 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3075 found_return = 1;
3076 else if (insn == 0x46f7) /* mov pc, lr */
3077 found_return = 1;
3078 else if (insn == 0x46bd) /* mov sp, r7 */
3079 found_stack_adjust = 1;
3080 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3081 found_stack_adjust = 1;
3082 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3083 {
3084 found_stack_adjust = 1;
3085 if (insn & 0x0100) /* <registers> include PC. */
3086 found_return = 1;
3087 }
3088 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3089 {
3090 if (target_read_memory (scan_pc, buf, 2))
3091 break;
3092
3093 scan_pc += 2;
3094 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3095
3096 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3097 {
3098 found_stack_adjust = 1;
3099 if (insn2 & 0x8000) /* <registers> include PC. */
3100 found_return = 1;
3101 }
3102 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3103 && (insn2 & 0x0fff) == 0x0b04)
3104 {
3105 found_stack_adjust = 1;
3106 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3107 found_return = 1;
3108 }
3109 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3110 && (insn2 & 0x0e00) == 0x0a00)
3111 found_stack_adjust = 1;
3112 else
3113 break;
3114 }
3115 else
3116 break;
3117 }
3118
3119 if (!found_return)
3120 return 0;
3121
3122 /* Since any instruction in the epilogue sequence, with the possible
3123 exception of return itself, updates the stack pointer, we need to
3124 scan backwards for at most one instruction. Try either a 16-bit or
3125 a 32-bit instruction. This is just a heuristic, so we do not worry
3126 too much about false positives. */
3127
3128 if (!found_stack_adjust)
3129 {
3130 if (pc - 4 < func_start)
3131 return 0;
3132 if (target_read_memory (pc - 4, buf, 4))
3133 return 0;
3134
3135 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3136 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3137
3138 if (insn2 == 0x46bd) /* mov sp, r7 */
3139 found_stack_adjust = 1;
3140 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3141 found_stack_adjust = 1;
3142 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3143 found_stack_adjust = 1;
3144 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3145 found_stack_adjust = 1;
3146 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3147 && (insn2 & 0x0fff) == 0x0b04)
3148 found_stack_adjust = 1;
3149 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3150 && (insn2 & 0x0e00) == 0x0a00)
3151 found_stack_adjust = 1;
3152 }
3153
3154 return found_stack_adjust;
3155 }
3156
3157 /* Return true if we are in the function's epilogue, i.e. after the
3158 instruction that destroyed the function's stack frame. */
3159
3160 static int
3161 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3162 {
3163 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3164 unsigned int insn;
3165 int found_return, found_stack_adjust;
3166 CORE_ADDR func_start, func_end;
3167
3168 if (arm_pc_is_thumb (gdbarch, pc))
3169 return thumb_in_function_epilogue_p (gdbarch, pc);
3170
3171 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3172 return 0;
3173
3174 /* We are in the epilogue if the previous instruction was a stack
3175 adjustment and the next instruction is a possible return (bx, mov
3176 pc, or pop). We could have to scan backwards to find the stack
3177 adjustment, or forwards to find the return, but this is a decent
3178 approximation. First scan forwards. */
3179
3180 found_return = 0;
3181 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3182 if (bits (insn, 28, 31) != INST_NV)
3183 {
3184 if ((insn & 0x0ffffff0) == 0x012fff10)
3185 /* BX. */
3186 found_return = 1;
3187 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3188 /* MOV PC. */
3189 found_return = 1;
3190 else if ((insn & 0x0fff0000) == 0x08bd0000
3191 && (insn & 0x0000c000) != 0)
3192 /* POP (LDMIA), including PC or LR. */
3193 found_return = 1;
3194 }
3195
3196 if (!found_return)
3197 return 0;
3198
3199 /* Scan backwards. This is just a heuristic, so do not worry about
3200 false positives from mode changes. */
3201
3202 if (pc < func_start + 4)
3203 return 0;
3204
3205 found_stack_adjust = 0;
3206 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3207 if (bits (insn, 28, 31) != INST_NV)
3208 {
3209 if ((insn & 0x0df0f000) == 0x0080d000)
3210 /* ADD SP (register or immediate). */
3211 found_stack_adjust = 1;
3212 else if ((insn & 0x0df0f000) == 0x0040d000)
3213 /* SUB SP (register or immediate). */
3214 found_stack_adjust = 1;
3215 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3216 /* MOV SP. */
3217 found_stack_adjust = 1;
3218 else if ((insn & 0x0fff0000) == 0x08bd0000)
3219 /* POP (LDMIA). */
3220 found_stack_adjust = 1;
3221 else if ((insn & 0x0fff0000) == 0x049d0000)
3222 /* POP of a single register. */
3223 found_stack_adjust = 1;
3224 }
3225
3226 if (found_stack_adjust)
3227 return 1;
3228
3229 return 0;
3230 }
3231
3232
3233 /* When arguments must be pushed onto the stack, they go on in reverse
3234 order. The code below implements a FILO (stack) to do this. */
3235
3236 struct stack_item
3237 {
3238 int len;
3239 struct stack_item *prev;
3240 void *data;
3241 };
3242
3243 static struct stack_item *
3244 push_stack_item (struct stack_item *prev, const void *contents, int len)
3245 {
3246 struct stack_item *si;
3247 si = xmalloc (sizeof (struct stack_item));
3248 si->data = xmalloc (len);
3249 si->len = len;
3250 si->prev = prev;
3251 memcpy (si->data, contents, len);
3252 return si;
3253 }
3254
3255 static struct stack_item *
3256 pop_stack_item (struct stack_item *si)
3257 {
3258 struct stack_item *dead = si;
3259 si = si->prev;
3260 xfree (dead->data);
3261 xfree (dead);
3262 return si;
3263 }
3264
3265
3266 /* Return the alignment (in bytes) of the given type. */
3267
3268 static int
3269 arm_type_align (struct type *t)
3270 {
3271 int n;
3272 int align;
3273 int falign;
3274
3275 t = check_typedef (t);
3276 switch (TYPE_CODE (t))
3277 {
3278 default:
3279 /* Should never happen. */
3280 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3281 return 4;
3282
3283 case TYPE_CODE_PTR:
3284 case TYPE_CODE_ENUM:
3285 case TYPE_CODE_INT:
3286 case TYPE_CODE_FLT:
3287 case TYPE_CODE_SET:
3288 case TYPE_CODE_RANGE:
3289 case TYPE_CODE_REF:
3290 case TYPE_CODE_CHAR:
3291 case TYPE_CODE_BOOL:
3292 return TYPE_LENGTH (t);
3293
3294 case TYPE_CODE_ARRAY:
3295 case TYPE_CODE_COMPLEX:
3296 /* TODO: What about vector types? */
3297 return arm_type_align (TYPE_TARGET_TYPE (t));
3298
3299 case TYPE_CODE_STRUCT:
3300 case TYPE_CODE_UNION:
3301 align = 1;
3302 for (n = 0; n < TYPE_NFIELDS (t); n++)
3303 {
3304 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3305 if (falign > align)
3306 align = falign;
3307 }
3308 return align;
3309 }
3310 }
3311
3312 /* Possible base types for a candidate for passing and returning in
3313 VFP registers. */
3314
3315 enum arm_vfp_cprc_base_type
3316 {
3317 VFP_CPRC_UNKNOWN,
3318 VFP_CPRC_SINGLE,
3319 VFP_CPRC_DOUBLE,
3320 VFP_CPRC_VEC64,
3321 VFP_CPRC_VEC128
3322 };
3323
3324 /* The length of one element of base type B. */
3325
3326 static unsigned
3327 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3328 {
3329 switch (b)
3330 {
3331 case VFP_CPRC_SINGLE:
3332 return 4;
3333 case VFP_CPRC_DOUBLE:
3334 return 8;
3335 case VFP_CPRC_VEC64:
3336 return 8;
3337 case VFP_CPRC_VEC128:
3338 return 16;
3339 default:
3340 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3341 (int) b);
3342 }
3343 }
3344
3345 /* The character ('s', 'd' or 'q') for the type of VFP register used
3346 for passing base type B. */
3347
3348 static int
3349 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3350 {
3351 switch (b)
3352 {
3353 case VFP_CPRC_SINGLE:
3354 return 's';
3355 case VFP_CPRC_DOUBLE:
3356 return 'd';
3357 case VFP_CPRC_VEC64:
3358 return 'd';
3359 case VFP_CPRC_VEC128:
3360 return 'q';
3361 default:
3362 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3363 (int) b);
3364 }
3365 }
3366
3367 /* Determine whether T may be part of a candidate for passing and
3368 returning in VFP registers, ignoring the limit on the total number
3369 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3370 classification of the first valid component found; if it is not
3371 VFP_CPRC_UNKNOWN, all components must have the same classification
3372 as *BASE_TYPE. If it is found that T contains a type not permitted
3373 for passing and returning in VFP registers, a type differently
3374 classified from *BASE_TYPE, or two types differently classified
3375 from each other, return -1, otherwise return the total number of
3376 base-type elements found (possibly 0 in an empty structure or
3377 array). Vectors and complex types are not currently supported,
3378 matching the generic AAPCS support. */
3379
3380 static int
3381 arm_vfp_cprc_sub_candidate (struct type *t,
3382 enum arm_vfp_cprc_base_type *base_type)
3383 {
3384 t = check_typedef (t);
3385 switch (TYPE_CODE (t))
3386 {
3387 case TYPE_CODE_FLT:
3388 switch (TYPE_LENGTH (t))
3389 {
3390 case 4:
3391 if (*base_type == VFP_CPRC_UNKNOWN)
3392 *base_type = VFP_CPRC_SINGLE;
3393 else if (*base_type != VFP_CPRC_SINGLE)
3394 return -1;
3395 return 1;
3396
3397 case 8:
3398 if (*base_type == VFP_CPRC_UNKNOWN)
3399 *base_type = VFP_CPRC_DOUBLE;
3400 else if (*base_type != VFP_CPRC_DOUBLE)
3401 return -1;
3402 return 1;
3403
3404 default:
3405 return -1;
3406 }
3407 break;
3408
3409 case TYPE_CODE_ARRAY:
3410 {
3411 int count;
3412 unsigned unitlen;
3413 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3414 if (count == -1)
3415 return -1;
3416 if (TYPE_LENGTH (t) == 0)
3417 {
3418 gdb_assert (count == 0);
3419 return 0;
3420 }
3421 else if (count == 0)
3422 return -1;
3423 unitlen = arm_vfp_cprc_unit_length (*base_type);
3424 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3425 return TYPE_LENGTH (t) / unitlen;
3426 }
3427 break;
3428
3429 case TYPE_CODE_STRUCT:
3430 {
3431 int count = 0;
3432 unsigned unitlen;
3433 int i;
3434 for (i = 0; i < TYPE_NFIELDS (t); i++)
3435 {
3436 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3437 base_type);
3438 if (sub_count == -1)
3439 return -1;
3440 count += sub_count;
3441 }
3442 if (TYPE_LENGTH (t) == 0)
3443 {
3444 gdb_assert (count == 0);
3445 return 0;
3446 }
3447 else if (count == 0)
3448 return -1;
3449 unitlen = arm_vfp_cprc_unit_length (*base_type);
3450 if (TYPE_LENGTH (t) != unitlen * count)
3451 return -1;
3452 return count;
3453 }
3454
3455 case TYPE_CODE_UNION:
3456 {
3457 int count = 0;
3458 unsigned unitlen;
3459 int i;
3460 for (i = 0; i < TYPE_NFIELDS (t); i++)
3461 {
3462 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3463 base_type);
3464 if (sub_count == -1)
3465 return -1;
3466 count = (count > sub_count ? count : sub_count);
3467 }
3468 if (TYPE_LENGTH (t) == 0)
3469 {
3470 gdb_assert (count == 0);
3471 return 0;
3472 }
3473 else if (count == 0)
3474 return -1;
3475 unitlen = arm_vfp_cprc_unit_length (*base_type);
3476 if (TYPE_LENGTH (t) != unitlen * count)
3477 return -1;
3478 return count;
3479 }
3480
3481 default:
3482 break;
3483 }
3484
3485 return -1;
3486 }
3487
3488 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3489 if passed to or returned from a non-variadic function with the VFP
3490 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3491 *BASE_TYPE to the base type for T and *COUNT to the number of
3492 elements of that base type before returning. */
3493
3494 static int
3495 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3496 int *count)
3497 {
3498 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3499 int c = arm_vfp_cprc_sub_candidate (t, &b);
3500 if (c <= 0 || c > 4)
3501 return 0;
3502 *base_type = b;
3503 *count = c;
3504 return 1;
3505 }
3506
3507 /* Return 1 if the VFP ABI should be used for passing arguments to and
3508 returning values from a function of type FUNC_TYPE, 0
3509 otherwise. */
3510
3511 static int
3512 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3513 {
3514 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3515 /* Variadic functions always use the base ABI. Assume that functions
3516 without debug info are not variadic. */
3517 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3518 return 0;
3519 /* The VFP ABI is only supported as a variant of AAPCS. */
3520 if (tdep->arm_abi != ARM_ABI_AAPCS)
3521 return 0;
3522 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3523 }
3524
3525 /* We currently only support passing parameters in integer registers, which
3526 conforms with GCC's default model, and VFP argument passing following
3527 the VFP variant of AAPCS. Several other variants exist and
3528 we should probably support some of them based on the selected ABI. */
3529
3530 static CORE_ADDR
3531 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3532 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3533 struct value **args, CORE_ADDR sp, int struct_return,
3534 CORE_ADDR struct_addr)
3535 {
3536 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3537 int argnum;
3538 int argreg;
3539 int nstack;
3540 struct stack_item *si = NULL;
3541 int use_vfp_abi;
3542 struct type *ftype;
3543 unsigned vfp_regs_free = (1 << 16) - 1;
3544
3545 /* Determine the type of this function and whether the VFP ABI
3546 applies. */
3547 ftype = check_typedef (value_type (function));
3548 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3549 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3550 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3551
3552 /* Set the return address. For the ARM, the return breakpoint is
3553 always at BP_ADDR. */
3554 if (arm_pc_is_thumb (gdbarch, bp_addr))
3555 bp_addr |= 1;
3556 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3557
3558 /* Walk through the list of args and determine how large a temporary
3559 stack is required. Need to take care here as structs may be
3560 passed on the stack, and we have to push them. */
3561 nstack = 0;
3562
3563 argreg = ARM_A1_REGNUM;
3564 nstack = 0;
3565
3566 /* The struct_return pointer occupies the first parameter
3567 passing register. */
3568 if (struct_return)
3569 {
3570 if (arm_debug)
3571 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3572 gdbarch_register_name (gdbarch, argreg),
3573 paddress (gdbarch, struct_addr));
3574 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3575 argreg++;
3576 }
3577
3578 for (argnum = 0; argnum < nargs; argnum++)
3579 {
3580 int len;
3581 struct type *arg_type;
3582 struct type *target_type;
3583 enum type_code typecode;
3584 const bfd_byte *val;
3585 int align;
3586 enum arm_vfp_cprc_base_type vfp_base_type;
3587 int vfp_base_count;
3588 int may_use_core_reg = 1;
3589
3590 arg_type = check_typedef (value_type (args[argnum]));
3591 len = TYPE_LENGTH (arg_type);
3592 target_type = TYPE_TARGET_TYPE (arg_type);
3593 typecode = TYPE_CODE (arg_type);
3594 val = value_contents (args[argnum]);
3595
3596 align = arm_type_align (arg_type);
3597 /* Round alignment up to a whole number of words. */
3598 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3599 /* Different ABIs have different maximum alignments. */
3600 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3601 {
3602 /* The APCS ABI only requires word alignment. */
3603 align = INT_REGISTER_SIZE;
3604 }
3605 else
3606 {
3607 /* The AAPCS requires at most doubleword alignment. */
3608 if (align > INT_REGISTER_SIZE * 2)
3609 align = INT_REGISTER_SIZE * 2;
3610 }
3611
3612 if (use_vfp_abi
3613 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3614 &vfp_base_count))
3615 {
3616 int regno;
3617 int unit_length;
3618 int shift;
3619 unsigned mask;
3620
3621 /* Because this is a CPRC it cannot go in a core register or
3622 cause a core register to be skipped for alignment.
3623 Either it goes in VFP registers and the rest of this loop
3624 iteration is skipped for this argument, or it goes on the
3625 stack (and the stack alignment code is correct for this
3626 case). */
3627 may_use_core_reg = 0;
3628
3629 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3630 shift = unit_length / 4;
3631 mask = (1 << (shift * vfp_base_count)) - 1;
3632 for (regno = 0; regno < 16; regno += shift)
3633 if (((vfp_regs_free >> regno) & mask) == mask)
3634 break;
3635
3636 if (regno < 16)
3637 {
3638 int reg_char;
3639 int reg_scaled;
3640 int i;
3641
3642 vfp_regs_free &= ~(mask << regno);
3643 reg_scaled = regno / shift;
3644 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3645 for (i = 0; i < vfp_base_count; i++)
3646 {
3647 char name_buf[4];
3648 int regnum;
3649 if (reg_char == 'q')
3650 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3651 val + i * unit_length);
3652 else
3653 {
3654 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i);
3655 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3656 strlen (name_buf));
3657 regcache_cooked_write (regcache, regnum,
3658 val + i * unit_length);
3659 }
3660 }
3661 continue;
3662 }
3663 else
3664 {
3665 /* This CPRC could not go in VFP registers, so all VFP
3666 registers are now marked as used. */
3667 vfp_regs_free = 0;
3668 }
3669 }
3670
3671 /* Push stack padding for dowubleword alignment. */
3672 if (nstack & (align - 1))
3673 {
3674 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3675 nstack += INT_REGISTER_SIZE;
3676 }
3677
3678 /* Doubleword aligned quantities must go in even register pairs. */
3679 if (may_use_core_reg
3680 && argreg <= ARM_LAST_ARG_REGNUM
3681 && align > INT_REGISTER_SIZE
3682 && argreg & 1)
3683 argreg++;
3684
3685 /* If the argument is a pointer to a function, and it is a
3686 Thumb function, create a LOCAL copy of the value and set
3687 the THUMB bit in it. */
3688 if (TYPE_CODE_PTR == typecode
3689 && target_type != NULL
3690 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3691 {
3692 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3693 if (arm_pc_is_thumb (gdbarch, regval))
3694 {
3695 bfd_byte *copy = alloca (len);
3696 store_unsigned_integer (copy, len, byte_order,
3697 MAKE_THUMB_ADDR (regval));
3698 val = copy;
3699 }
3700 }
3701
3702 /* Copy the argument to general registers or the stack in
3703 register-sized pieces. Large arguments are split between
3704 registers and stack. */
3705 while (len > 0)
3706 {
3707 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3708
3709 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3710 {
3711 /* The argument is being passed in a general purpose
3712 register. */
3713 CORE_ADDR regval
3714 = extract_unsigned_integer (val, partial_len, byte_order);
3715 if (byte_order == BFD_ENDIAN_BIG)
3716 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3717 if (arm_debug)
3718 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3719 argnum,
3720 gdbarch_register_name
3721 (gdbarch, argreg),
3722 phex (regval, INT_REGISTER_SIZE));
3723 regcache_cooked_write_unsigned (regcache, argreg, regval);
3724 argreg++;
3725 }
3726 else
3727 {
3728 /* Push the arguments onto the stack. */
3729 if (arm_debug)
3730 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3731 argnum, nstack);
3732 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3733 nstack += INT_REGISTER_SIZE;
3734 }
3735
3736 len -= partial_len;
3737 val += partial_len;
3738 }
3739 }
3740 /* If we have an odd number of words to push, then decrement the stack
3741 by one word now, so first stack argument will be dword aligned. */
3742 if (nstack & 4)
3743 sp -= 4;
3744
3745 while (si)
3746 {
3747 sp -= si->len;
3748 write_memory (sp, si->data, si->len);
3749 si = pop_stack_item (si);
3750 }
3751
3752 /* Finally, update teh SP register. */
3753 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3754
3755 return sp;
3756 }
3757
3758
3759 /* Always align the frame to an 8-byte boundary. This is required on
3760 some platforms and harmless on the rest. */
3761
3762 static CORE_ADDR
3763 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3764 {
3765 /* Align the stack to eight bytes. */
3766 return sp & ~ (CORE_ADDR) 7;
3767 }
3768
3769 static void
3770 print_fpu_flags (int flags)
3771 {
3772 if (flags & (1 << 0))
3773 fputs ("IVO ", stdout);
3774 if (flags & (1 << 1))
3775 fputs ("DVZ ", stdout);
3776 if (flags & (1 << 2))
3777 fputs ("OFL ", stdout);
3778 if (flags & (1 << 3))
3779 fputs ("UFL ", stdout);
3780 if (flags & (1 << 4))
3781 fputs ("INX ", stdout);
3782 putchar ('\n');
3783 }
3784
3785 /* Print interesting information about the floating point processor
3786 (if present) or emulator. */
3787 static void
3788 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3789 struct frame_info *frame, const char *args)
3790 {
3791 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3792 int type;
3793
3794 type = (status >> 24) & 127;
3795 if (status & (1 << 31))
3796 printf (_("Hardware FPU type %d\n"), type);
3797 else
3798 printf (_("Software FPU type %d\n"), type);
3799 /* i18n: [floating point unit] mask */
3800 fputs (_("mask: "), stdout);
3801 print_fpu_flags (status >> 16);
3802 /* i18n: [floating point unit] flags */
3803 fputs (_("flags: "), stdout);
3804 print_fpu_flags (status);
3805 }
3806
3807 /* Construct the ARM extended floating point type. */
3808 static struct type *
3809 arm_ext_type (struct gdbarch *gdbarch)
3810 {
3811 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3812
3813 if (!tdep->arm_ext_type)
3814 tdep->arm_ext_type
3815 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3816 floatformats_arm_ext);
3817
3818 return tdep->arm_ext_type;
3819 }
3820
3821 static struct type *
3822 arm_neon_double_type (struct gdbarch *gdbarch)
3823 {
3824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3825
3826 if (tdep->neon_double_type == NULL)
3827 {
3828 struct type *t, *elem;
3829
3830 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3831 TYPE_CODE_UNION);
3832 elem = builtin_type (gdbarch)->builtin_uint8;
3833 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3834 elem = builtin_type (gdbarch)->builtin_uint16;
3835 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3836 elem = builtin_type (gdbarch)->builtin_uint32;
3837 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3838 elem = builtin_type (gdbarch)->builtin_uint64;
3839 append_composite_type_field (t, "u64", elem);
3840 elem = builtin_type (gdbarch)->builtin_float;
3841 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3842 elem = builtin_type (gdbarch)->builtin_double;
3843 append_composite_type_field (t, "f64", elem);
3844
3845 TYPE_VECTOR (t) = 1;
3846 TYPE_NAME (t) = "neon_d";
3847 tdep->neon_double_type = t;
3848 }
3849
3850 return tdep->neon_double_type;
3851 }
3852
3853 /* FIXME: The vector types are not correctly ordered on big-endian
3854 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3855 bits of d0 - regardless of what unit size is being held in d0. So
3856 the offset of the first uint8 in d0 is 7, but the offset of the
3857 first float is 4. This code works as-is for little-endian
3858 targets. */
3859
3860 static struct type *
3861 arm_neon_quad_type (struct gdbarch *gdbarch)
3862 {
3863 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3864
3865 if (tdep->neon_quad_type == NULL)
3866 {
3867 struct type *t, *elem;
3868
3869 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3870 TYPE_CODE_UNION);
3871 elem = builtin_type (gdbarch)->builtin_uint8;
3872 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3873 elem = builtin_type (gdbarch)->builtin_uint16;
3874 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3875 elem = builtin_type (gdbarch)->builtin_uint32;
3876 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3877 elem = builtin_type (gdbarch)->builtin_uint64;
3878 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
3879 elem = builtin_type (gdbarch)->builtin_float;
3880 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
3881 elem = builtin_type (gdbarch)->builtin_double;
3882 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
3883
3884 TYPE_VECTOR (t) = 1;
3885 TYPE_NAME (t) = "neon_q";
3886 tdep->neon_quad_type = t;
3887 }
3888
3889 return tdep->neon_quad_type;
3890 }
3891
3892 /* Return the GDB type object for the "standard" data type of data in
3893 register N. */
3894
3895 static struct type *
3896 arm_register_type (struct gdbarch *gdbarch, int regnum)
3897 {
3898 int num_regs = gdbarch_num_regs (gdbarch);
3899
3900 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
3901 && regnum >= num_regs && regnum < num_regs + 32)
3902 return builtin_type (gdbarch)->builtin_float;
3903
3904 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
3905 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
3906 return arm_neon_quad_type (gdbarch);
3907
3908 /* If the target description has register information, we are only
3909 in this function so that we can override the types of
3910 double-precision registers for NEON. */
3911 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
3912 {
3913 struct type *t = tdesc_register_type (gdbarch, regnum);
3914
3915 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
3916 && TYPE_CODE (t) == TYPE_CODE_FLT
3917 && gdbarch_tdep (gdbarch)->have_neon)
3918 return arm_neon_double_type (gdbarch);
3919 else
3920 return t;
3921 }
3922
3923 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
3924 {
3925 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
3926 return builtin_type (gdbarch)->builtin_void;
3927
3928 return arm_ext_type (gdbarch);
3929 }
3930 else if (regnum == ARM_SP_REGNUM)
3931 return builtin_type (gdbarch)->builtin_data_ptr;
3932 else if (regnum == ARM_PC_REGNUM)
3933 return builtin_type (gdbarch)->builtin_func_ptr;
3934 else if (regnum >= ARRAY_SIZE (arm_register_names))
3935 /* These registers are only supported on targets which supply
3936 an XML description. */
3937 return builtin_type (gdbarch)->builtin_int0;
3938 else
3939 return builtin_type (gdbarch)->builtin_uint32;
3940 }
3941
3942 /* Map a DWARF register REGNUM onto the appropriate GDB register
3943 number. */
3944
3945 static int
3946 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
3947 {
3948 /* Core integer regs. */
3949 if (reg >= 0 && reg <= 15)
3950 return reg;
3951
3952 /* Legacy FPA encoding. These were once used in a way which
3953 overlapped with VFP register numbering, so their use is
3954 discouraged, but GDB doesn't support the ARM toolchain
3955 which used them for VFP. */
3956 if (reg >= 16 && reg <= 23)
3957 return ARM_F0_REGNUM + reg - 16;
3958
3959 /* New assignments for the FPA registers. */
3960 if (reg >= 96 && reg <= 103)
3961 return ARM_F0_REGNUM + reg - 96;
3962
3963 /* WMMX register assignments. */
3964 if (reg >= 104 && reg <= 111)
3965 return ARM_WCGR0_REGNUM + reg - 104;
3966
3967 if (reg >= 112 && reg <= 127)
3968 return ARM_WR0_REGNUM + reg - 112;
3969
3970 if (reg >= 192 && reg <= 199)
3971 return ARM_WC0_REGNUM + reg - 192;
3972
3973 /* VFP v2 registers. A double precision value is actually
3974 in d1 rather than s2, but the ABI only defines numbering
3975 for the single precision registers. This will "just work"
3976 in GDB for little endian targets (we'll read eight bytes,
3977 starting in s0 and then progressing to s1), but will be
3978 reversed on big endian targets with VFP. This won't
3979 be a problem for the new Neon quad registers; you're supposed
3980 to use DW_OP_piece for those. */
3981 if (reg >= 64 && reg <= 95)
3982 {
3983 char name_buf[4];
3984
3985 sprintf (name_buf, "s%d", reg - 64);
3986 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3987 strlen (name_buf));
3988 }
3989
3990 /* VFP v3 / Neon registers. This range is also used for VFP v2
3991 registers, except that it now describes d0 instead of s0. */
3992 if (reg >= 256 && reg <= 287)
3993 {
3994 char name_buf[4];
3995
3996 sprintf (name_buf, "d%d", reg - 256);
3997 return user_reg_map_name_to_regnum (gdbarch, name_buf,
3998 strlen (name_buf));
3999 }
4000
4001 return -1;
4002 }
4003
4004 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4005 static int
4006 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4007 {
4008 int reg = regnum;
4009 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4010
4011 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4012 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4013
4014 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4015 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4016
4017 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4018 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4019
4020 if (reg < NUM_GREGS)
4021 return SIM_ARM_R0_REGNUM + reg;
4022 reg -= NUM_GREGS;
4023
4024 if (reg < NUM_FREGS)
4025 return SIM_ARM_FP0_REGNUM + reg;
4026 reg -= NUM_FREGS;
4027
4028 if (reg < NUM_SREGS)
4029 return SIM_ARM_FPS_REGNUM + reg;
4030 reg -= NUM_SREGS;
4031
4032 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4033 }
4034
4035 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4036 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4037 It is thought that this is is the floating-point register format on
4038 little-endian systems. */
4039
4040 static void
4041 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4042 void *dbl, int endianess)
4043 {
4044 DOUBLEST d;
4045
4046 if (endianess == BFD_ENDIAN_BIG)
4047 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4048 else
4049 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4050 ptr, &d);
4051 floatformat_from_doublest (fmt, &d, dbl);
4052 }
4053
4054 static void
4055 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4056 int endianess)
4057 {
4058 DOUBLEST d;
4059
4060 floatformat_to_doublest (fmt, ptr, &d);
4061 if (endianess == BFD_ENDIAN_BIG)
4062 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4063 else
4064 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4065 &d, dbl);
4066 }
4067
4068 static int
4069 condition_true (unsigned long cond, unsigned long status_reg)
4070 {
4071 if (cond == INST_AL || cond == INST_NV)
4072 return 1;
4073
4074 switch (cond)
4075 {
4076 case INST_EQ:
4077 return ((status_reg & FLAG_Z) != 0);
4078 case INST_NE:
4079 return ((status_reg & FLAG_Z) == 0);
4080 case INST_CS:
4081 return ((status_reg & FLAG_C) != 0);
4082 case INST_CC:
4083 return ((status_reg & FLAG_C) == 0);
4084 case INST_MI:
4085 return ((status_reg & FLAG_N) != 0);
4086 case INST_PL:
4087 return ((status_reg & FLAG_N) == 0);
4088 case INST_VS:
4089 return ((status_reg & FLAG_V) != 0);
4090 case INST_VC:
4091 return ((status_reg & FLAG_V) == 0);
4092 case INST_HI:
4093 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4094 case INST_LS:
4095 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4096 case INST_GE:
4097 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4098 case INST_LT:
4099 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4100 case INST_GT:
4101 return (((status_reg & FLAG_Z) == 0)
4102 && (((status_reg & FLAG_N) == 0)
4103 == ((status_reg & FLAG_V) == 0)));
4104 case INST_LE:
4105 return (((status_reg & FLAG_Z) != 0)
4106 || (((status_reg & FLAG_N) == 0)
4107 != ((status_reg & FLAG_V) == 0)));
4108 }
4109 return 1;
4110 }
4111
4112 static unsigned long
4113 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4114 unsigned long pc_val, unsigned long status_reg)
4115 {
4116 unsigned long res, shift;
4117 int rm = bits (inst, 0, 3);
4118 unsigned long shifttype = bits (inst, 5, 6);
4119
4120 if (bit (inst, 4))
4121 {
4122 int rs = bits (inst, 8, 11);
4123 shift = (rs == 15 ? pc_val + 8
4124 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4125 }
4126 else
4127 shift = bits (inst, 7, 11);
4128
4129 res = (rm == ARM_PC_REGNUM
4130 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4131 : get_frame_register_unsigned (frame, rm));
4132
4133 switch (shifttype)
4134 {
4135 case 0: /* LSL */
4136 res = shift >= 32 ? 0 : res << shift;
4137 break;
4138
4139 case 1: /* LSR */
4140 res = shift >= 32 ? 0 : res >> shift;
4141 break;
4142
4143 case 2: /* ASR */
4144 if (shift >= 32)
4145 shift = 31;
4146 res = ((res & 0x80000000L)
4147 ? ~((~res) >> shift) : res >> shift);
4148 break;
4149
4150 case 3: /* ROR/RRX */
4151 shift &= 31;
4152 if (shift == 0)
4153 res = (res >> 1) | (carry ? 0x80000000L : 0);
4154 else
4155 res = (res >> shift) | (res << (32 - shift));
4156 break;
4157 }
4158
4159 return res & 0xffffffff;
4160 }
4161
4162 /* Return number of 1-bits in VAL. */
4163
4164 static int
4165 bitcount (unsigned long val)
4166 {
4167 int nbits;
4168 for (nbits = 0; val != 0; nbits++)
4169 val &= val - 1; /* Delete rightmost 1-bit in val. */
4170 return nbits;
4171 }
4172
4173 /* Return the size in bytes of the complete Thumb instruction whose
4174 first halfword is INST1. */
4175
4176 static int
4177 thumb_insn_size (unsigned short inst1)
4178 {
4179 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4180 return 4;
4181 else
4182 return 2;
4183 }
4184
4185 static int
4186 thumb_advance_itstate (unsigned int itstate)
4187 {
4188 /* Preserve IT[7:5], the first three bits of the condition. Shift
4189 the upcoming condition flags left by one bit. */
4190 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4191
4192 /* If we have finished the IT block, clear the state. */
4193 if ((itstate & 0x0f) == 0)
4194 itstate = 0;
4195
4196 return itstate;
4197 }
4198
4199 /* Find the next PC after the current instruction executes. In some
4200 cases we can not statically determine the answer (see the IT state
4201 handling in this function); in that case, a breakpoint may be
4202 inserted in addition to the returned PC, which will be used to set
4203 another breakpoint by our caller. */
4204
4205 static CORE_ADDR
4206 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4207 {
4208 struct gdbarch *gdbarch = get_frame_arch (frame);
4209 struct address_space *aspace = get_frame_address_space (frame);
4210 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4211 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4212 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4213 unsigned short inst1;
4214 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4215 unsigned long offset;
4216 ULONGEST status, itstate;
4217
4218 nextpc = MAKE_THUMB_ADDR (nextpc);
4219 pc_val = MAKE_THUMB_ADDR (pc_val);
4220
4221 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4222
4223 /* Thumb-2 conditional execution support. There are eight bits in
4224 the CPSR which describe conditional execution state. Once
4225 reconstructed (they're in a funny order), the low five bits
4226 describe the low bit of the condition for each instruction and
4227 how many instructions remain. The high three bits describe the
4228 base condition. One of the low four bits will be set if an IT
4229 block is active. These bits read as zero on earlier
4230 processors. */
4231 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4232 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4233
4234 /* If-Then handling. On GNU/Linux, where this routine is used, we
4235 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4236 can disable execution of the undefined instruction. So we might
4237 miss the breakpoint if we set it on a skipped conditional
4238 instruction. Because conditional instructions can change the
4239 flags, affecting the execution of further instructions, we may
4240 need to set two breakpoints. */
4241
4242 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4243 {
4244 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4245 {
4246 /* An IT instruction. Because this instruction does not
4247 modify the flags, we can accurately predict the next
4248 executed instruction. */
4249 itstate = inst1 & 0x00ff;
4250 pc += thumb_insn_size (inst1);
4251
4252 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4253 {
4254 inst1 = read_memory_unsigned_integer (pc, 2,
4255 byte_order_for_code);
4256 pc += thumb_insn_size (inst1);
4257 itstate = thumb_advance_itstate (itstate);
4258 }
4259
4260 return MAKE_THUMB_ADDR (pc);
4261 }
4262 else if (itstate != 0)
4263 {
4264 /* We are in a conditional block. Check the condition. */
4265 if (! condition_true (itstate >> 4, status))
4266 {
4267 /* Advance to the next executed instruction. */
4268 pc += thumb_insn_size (inst1);
4269 itstate = thumb_advance_itstate (itstate);
4270
4271 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4272 {
4273 inst1 = read_memory_unsigned_integer (pc, 2,
4274 byte_order_for_code);
4275 pc += thumb_insn_size (inst1);
4276 itstate = thumb_advance_itstate (itstate);
4277 }
4278
4279 return MAKE_THUMB_ADDR (pc);
4280 }
4281 else if ((itstate & 0x0f) == 0x08)
4282 {
4283 /* This is the last instruction of the conditional
4284 block, and it is executed. We can handle it normally
4285 because the following instruction is not conditional,
4286 and we must handle it normally because it is
4287 permitted to branch. Fall through. */
4288 }
4289 else
4290 {
4291 int cond_negated;
4292
4293 /* There are conditional instructions after this one.
4294 If this instruction modifies the flags, then we can
4295 not predict what the next executed instruction will
4296 be. Fortunately, this instruction is architecturally
4297 forbidden to branch; we know it will fall through.
4298 Start by skipping past it. */
4299 pc += thumb_insn_size (inst1);
4300 itstate = thumb_advance_itstate (itstate);
4301
4302 /* Set a breakpoint on the following instruction. */
4303 gdb_assert ((itstate & 0x0f) != 0);
4304 arm_insert_single_step_breakpoint (gdbarch, aspace,
4305 MAKE_THUMB_ADDR (pc));
4306 cond_negated = (itstate >> 4) & 1;
4307
4308 /* Skip all following instructions with the same
4309 condition. If there is a later instruction in the IT
4310 block with the opposite condition, set the other
4311 breakpoint there. If not, then set a breakpoint on
4312 the instruction after the IT block. */
4313 do
4314 {
4315 inst1 = read_memory_unsigned_integer (pc, 2,
4316 byte_order_for_code);
4317 pc += thumb_insn_size (inst1);
4318 itstate = thumb_advance_itstate (itstate);
4319 }
4320 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4321
4322 return MAKE_THUMB_ADDR (pc);
4323 }
4324 }
4325 }
4326 else if (itstate & 0x0f)
4327 {
4328 /* We are in a conditional block. Check the condition. */
4329 int cond = itstate >> 4;
4330
4331 if (! condition_true (cond, status))
4332 /* Advance to the next instruction. All the 32-bit
4333 instructions share a common prefix. */
4334 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4335
4336 /* Otherwise, handle the instruction normally. */
4337 }
4338
4339 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4340 {
4341 CORE_ADDR sp;
4342
4343 /* Fetch the saved PC from the stack. It's stored above
4344 all of the other registers. */
4345 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4346 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4347 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4348 }
4349 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4350 {
4351 unsigned long cond = bits (inst1, 8, 11);
4352 if (cond == 0x0f) /* 0x0f = SWI */
4353 {
4354 struct gdbarch_tdep *tdep;
4355 tdep = gdbarch_tdep (gdbarch);
4356
4357 if (tdep->syscall_next_pc != NULL)
4358 nextpc = tdep->syscall_next_pc (frame);
4359
4360 }
4361 else if (cond != 0x0f && condition_true (cond, status))
4362 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4363 }
4364 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4365 {
4366 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4367 }
4368 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4369 {
4370 unsigned short inst2;
4371 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4372
4373 /* Default to the next instruction. */
4374 nextpc = pc + 4;
4375 nextpc = MAKE_THUMB_ADDR (nextpc);
4376
4377 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4378 {
4379 /* Branches and miscellaneous control instructions. */
4380
4381 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4382 {
4383 /* B, BL, BLX. */
4384 int j1, j2, imm1, imm2;
4385
4386 imm1 = sbits (inst1, 0, 10);
4387 imm2 = bits (inst2, 0, 10);
4388 j1 = bit (inst2, 13);
4389 j2 = bit (inst2, 11);
4390
4391 offset = ((imm1 << 12) + (imm2 << 1));
4392 offset ^= ((!j2) << 22) | ((!j1) << 23);
4393
4394 nextpc = pc_val + offset;
4395 /* For BLX make sure to clear the low bits. */
4396 if (bit (inst2, 12) == 0)
4397 nextpc = nextpc & 0xfffffffc;
4398 }
4399 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4400 {
4401 /* SUBS PC, LR, #imm8. */
4402 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4403 nextpc -= inst2 & 0x00ff;
4404 }
4405 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4406 {
4407 /* Conditional branch. */
4408 if (condition_true (bits (inst1, 6, 9), status))
4409 {
4410 int sign, j1, j2, imm1, imm2;
4411
4412 sign = sbits (inst1, 10, 10);
4413 imm1 = bits (inst1, 0, 5);
4414 imm2 = bits (inst2, 0, 10);
4415 j1 = bit (inst2, 13);
4416 j2 = bit (inst2, 11);
4417
4418 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4419 offset += (imm1 << 12) + (imm2 << 1);
4420
4421 nextpc = pc_val + offset;
4422 }
4423 }
4424 }
4425 else if ((inst1 & 0xfe50) == 0xe810)
4426 {
4427 /* Load multiple or RFE. */
4428 int rn, offset, load_pc = 1;
4429
4430 rn = bits (inst1, 0, 3);
4431 if (bit (inst1, 7) && !bit (inst1, 8))
4432 {
4433 /* LDMIA or POP */
4434 if (!bit (inst2, 15))
4435 load_pc = 0;
4436 offset = bitcount (inst2) * 4 - 4;
4437 }
4438 else if (!bit (inst1, 7) && bit (inst1, 8))
4439 {
4440 /* LDMDB */
4441 if (!bit (inst2, 15))
4442 load_pc = 0;
4443 offset = -4;
4444 }
4445 else if (bit (inst1, 7) && bit (inst1, 8))
4446 {
4447 /* RFEIA */
4448 offset = 0;
4449 }
4450 else if (!bit (inst1, 7) && !bit (inst1, 8))
4451 {
4452 /* RFEDB */
4453 offset = -8;
4454 }
4455 else
4456 load_pc = 0;
4457
4458 if (load_pc)
4459 {
4460 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4461 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4462 }
4463 }
4464 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4465 {
4466 /* MOV PC or MOVS PC. */
4467 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4468 nextpc = MAKE_THUMB_ADDR (nextpc);
4469 }
4470 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4471 {
4472 /* LDR PC. */
4473 CORE_ADDR base;
4474 int rn, load_pc = 1;
4475
4476 rn = bits (inst1, 0, 3);
4477 base = get_frame_register_unsigned (frame, rn);
4478 if (rn == ARM_PC_REGNUM)
4479 {
4480 base = (base + 4) & ~(CORE_ADDR) 0x3;
4481 if (bit (inst1, 7))
4482 base += bits (inst2, 0, 11);
4483 else
4484 base -= bits (inst2, 0, 11);
4485 }
4486 else if (bit (inst1, 7))
4487 base += bits (inst2, 0, 11);
4488 else if (bit (inst2, 11))
4489 {
4490 if (bit (inst2, 10))
4491 {
4492 if (bit (inst2, 9))
4493 base += bits (inst2, 0, 7);
4494 else
4495 base -= bits (inst2, 0, 7);
4496 }
4497 }
4498 else if ((inst2 & 0x0fc0) == 0x0000)
4499 {
4500 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4501 base += get_frame_register_unsigned (frame, rm) << shift;
4502 }
4503 else
4504 /* Reserved. */
4505 load_pc = 0;
4506
4507 if (load_pc)
4508 nextpc = get_frame_memory_unsigned (frame, base, 4);
4509 }
4510 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4511 {
4512 /* TBB. */
4513 CORE_ADDR tbl_reg, table, offset, length;
4514
4515 tbl_reg = bits (inst1, 0, 3);
4516 if (tbl_reg == 0x0f)
4517 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4518 else
4519 table = get_frame_register_unsigned (frame, tbl_reg);
4520
4521 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4522 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4523 nextpc = pc_val + length;
4524 }
4525 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4526 {
4527 /* TBH. */
4528 CORE_ADDR tbl_reg, table, offset, length;
4529
4530 tbl_reg = bits (inst1, 0, 3);
4531 if (tbl_reg == 0x0f)
4532 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4533 else
4534 table = get_frame_register_unsigned (frame, tbl_reg);
4535
4536 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4537 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4538 nextpc = pc_val + length;
4539 }
4540 }
4541 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4542 {
4543 if (bits (inst1, 3, 6) == 0x0f)
4544 nextpc = UNMAKE_THUMB_ADDR (pc_val);
4545 else
4546 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4547 }
4548 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4549 {
4550 if (bits (inst1, 3, 6) == 0x0f)
4551 nextpc = pc_val;
4552 else
4553 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4554
4555 nextpc = MAKE_THUMB_ADDR (nextpc);
4556 }
4557 else if ((inst1 & 0xf500) == 0xb100)
4558 {
4559 /* CBNZ or CBZ. */
4560 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4561 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4562
4563 if (bit (inst1, 11) && reg != 0)
4564 nextpc = pc_val + imm;
4565 else if (!bit (inst1, 11) && reg == 0)
4566 nextpc = pc_val + imm;
4567 }
4568 return nextpc;
4569 }
4570
4571 /* Get the raw next address. PC is the current program counter, in
4572 FRAME, which is assumed to be executing in ARM mode.
4573
4574 The value returned has the execution state of the next instruction
4575 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4576 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4577 address. */
4578
4579 static CORE_ADDR
4580 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4581 {
4582 struct gdbarch *gdbarch = get_frame_arch (frame);
4583 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4584 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4585 unsigned long pc_val;
4586 unsigned long this_instr;
4587 unsigned long status;
4588 CORE_ADDR nextpc;
4589
4590 pc_val = (unsigned long) pc;
4591 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4592
4593 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4594 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4595
4596 if (bits (this_instr, 28, 31) == INST_NV)
4597 switch (bits (this_instr, 24, 27))
4598 {
4599 case 0xa:
4600 case 0xb:
4601 {
4602 /* Branch with Link and change to Thumb. */
4603 nextpc = BranchDest (pc, this_instr);
4604 nextpc |= bit (this_instr, 24) << 1;
4605 nextpc = MAKE_THUMB_ADDR (nextpc);
4606 break;
4607 }
4608 case 0xc:
4609 case 0xd:
4610 case 0xe:
4611 /* Coprocessor register transfer. */
4612 if (bits (this_instr, 12, 15) == 15)
4613 error (_("Invalid update to pc in instruction"));
4614 break;
4615 }
4616 else if (condition_true (bits (this_instr, 28, 31), status))
4617 {
4618 switch (bits (this_instr, 24, 27))
4619 {
4620 case 0x0:
4621 case 0x1: /* data processing */
4622 case 0x2:
4623 case 0x3:
4624 {
4625 unsigned long operand1, operand2, result = 0;
4626 unsigned long rn;
4627 int c;
4628
4629 if (bits (this_instr, 12, 15) != 15)
4630 break;
4631
4632 if (bits (this_instr, 22, 25) == 0
4633 && bits (this_instr, 4, 7) == 9) /* multiply */
4634 error (_("Invalid update to pc in instruction"));
4635
4636 /* BX <reg>, BLX <reg> */
4637 if (bits (this_instr, 4, 27) == 0x12fff1
4638 || bits (this_instr, 4, 27) == 0x12fff3)
4639 {
4640 rn = bits (this_instr, 0, 3);
4641 nextpc = ((rn == ARM_PC_REGNUM)
4642 ? (pc_val + 8)
4643 : get_frame_register_unsigned (frame, rn));
4644
4645 return nextpc;
4646 }
4647
4648 /* Multiply into PC. */
4649 c = (status & FLAG_C) ? 1 : 0;
4650 rn = bits (this_instr, 16, 19);
4651 operand1 = ((rn == ARM_PC_REGNUM)
4652 ? (pc_val + 8)
4653 : get_frame_register_unsigned (frame, rn));
4654
4655 if (bit (this_instr, 25))
4656 {
4657 unsigned long immval = bits (this_instr, 0, 7);
4658 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4659 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4660 & 0xffffffff;
4661 }
4662 else /* operand 2 is a shifted register. */
4663 operand2 = shifted_reg_val (frame, this_instr, c,
4664 pc_val, status);
4665
4666 switch (bits (this_instr, 21, 24))
4667 {
4668 case 0x0: /*and */
4669 result = operand1 & operand2;
4670 break;
4671
4672 case 0x1: /*eor */
4673 result = operand1 ^ operand2;
4674 break;
4675
4676 case 0x2: /*sub */
4677 result = operand1 - operand2;
4678 break;
4679
4680 case 0x3: /*rsb */
4681 result = operand2 - operand1;
4682 break;
4683
4684 case 0x4: /*add */
4685 result = operand1 + operand2;
4686 break;
4687
4688 case 0x5: /*adc */
4689 result = operand1 + operand2 + c;
4690 break;
4691
4692 case 0x6: /*sbc */
4693 result = operand1 - operand2 + c;
4694 break;
4695
4696 case 0x7: /*rsc */
4697 result = operand2 - operand1 + c;
4698 break;
4699
4700 case 0x8:
4701 case 0x9:
4702 case 0xa:
4703 case 0xb: /* tst, teq, cmp, cmn */
4704 result = (unsigned long) nextpc;
4705 break;
4706
4707 case 0xc: /*orr */
4708 result = operand1 | operand2;
4709 break;
4710
4711 case 0xd: /*mov */
4712 /* Always step into a function. */
4713 result = operand2;
4714 break;
4715
4716 case 0xe: /*bic */
4717 result = operand1 & ~operand2;
4718 break;
4719
4720 case 0xf: /*mvn */
4721 result = ~operand2;
4722 break;
4723 }
4724
4725 /* In 26-bit APCS the bottom two bits of the result are
4726 ignored, and we always end up in ARM state. */
4727 if (!arm_apcs_32)
4728 nextpc = arm_addr_bits_remove (gdbarch, result);
4729 else
4730 nextpc = result;
4731
4732 break;
4733 }
4734
4735 case 0x4:
4736 case 0x5: /* data transfer */
4737 case 0x6:
4738 case 0x7:
4739 if (bit (this_instr, 20))
4740 {
4741 /* load */
4742 if (bits (this_instr, 12, 15) == 15)
4743 {
4744 /* rd == pc */
4745 unsigned long rn;
4746 unsigned long base;
4747
4748 if (bit (this_instr, 22))
4749 error (_("Invalid update to pc in instruction"));
4750
4751 /* byte write to PC */
4752 rn = bits (this_instr, 16, 19);
4753 base = ((rn == ARM_PC_REGNUM)
4754 ? (pc_val + 8)
4755 : get_frame_register_unsigned (frame, rn));
4756
4757 if (bit (this_instr, 24))
4758 {
4759 /* pre-indexed */
4760 int c = (status & FLAG_C) ? 1 : 0;
4761 unsigned long offset =
4762 (bit (this_instr, 25)
4763 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4764 : bits (this_instr, 0, 11));
4765
4766 if (bit (this_instr, 23))
4767 base += offset;
4768 else
4769 base -= offset;
4770 }
4771 nextpc =
4772 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4773 4, byte_order);
4774 }
4775 }
4776 break;
4777
4778 case 0x8:
4779 case 0x9: /* block transfer */
4780 if (bit (this_instr, 20))
4781 {
4782 /* LDM */
4783 if (bit (this_instr, 15))
4784 {
4785 /* loading pc */
4786 int offset = 0;
4787 unsigned long rn_val
4788 = get_frame_register_unsigned (frame,
4789 bits (this_instr, 16, 19));
4790
4791 if (bit (this_instr, 23))
4792 {
4793 /* up */
4794 unsigned long reglist = bits (this_instr, 0, 14);
4795 offset = bitcount (reglist) * 4;
4796 if (bit (this_instr, 24)) /* pre */
4797 offset += 4;
4798 }
4799 else if (bit (this_instr, 24))
4800 offset = -4;
4801
4802 nextpc =
4803 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4804 (rn_val + offset),
4805 4, byte_order);
4806 }
4807 }
4808 break;
4809
4810 case 0xb: /* branch & link */
4811 case 0xa: /* branch */
4812 {
4813 nextpc = BranchDest (pc, this_instr);
4814 break;
4815 }
4816
4817 case 0xc:
4818 case 0xd:
4819 case 0xe: /* coproc ops */
4820 break;
4821 case 0xf: /* SWI */
4822 {
4823 struct gdbarch_tdep *tdep;
4824 tdep = gdbarch_tdep (gdbarch);
4825
4826 if (tdep->syscall_next_pc != NULL)
4827 nextpc = tdep->syscall_next_pc (frame);
4828
4829 }
4830 break;
4831
4832 default:
4833 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4834 return (pc);
4835 }
4836 }
4837
4838 return nextpc;
4839 }
4840
4841 /* Determine next PC after current instruction executes. Will call either
4842 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4843 loop is detected. */
4844
4845 CORE_ADDR
4846 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4847 {
4848 CORE_ADDR nextpc;
4849
4850 if (arm_frame_is_thumb (frame))
4851 {
4852 nextpc = thumb_get_next_pc_raw (frame, pc);
4853 if (nextpc == MAKE_THUMB_ADDR (pc))
4854 error (_("Infinite loop detected"));
4855 }
4856 else
4857 {
4858 nextpc = arm_get_next_pc_raw (frame, pc);
4859 if (nextpc == pc)
4860 error (_("Infinite loop detected"));
4861 }
4862
4863 return nextpc;
4864 }
4865
4866 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4867 of the appropriate mode (as encoded in the PC value), even if this
4868 differs from what would be expected according to the symbol tables. */
4869
4870 void
4871 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4872 struct address_space *aspace,
4873 CORE_ADDR pc)
4874 {
4875 struct cleanup *old_chain
4876 = make_cleanup_restore_integer (&arm_override_mode);
4877
4878 arm_override_mode = IS_THUMB_ADDR (pc);
4879 pc = gdbarch_addr_bits_remove (gdbarch, pc);
4880
4881 insert_single_step_breakpoint (gdbarch, aspace, pc);
4882
4883 do_cleanups (old_chain);
4884 }
4885
4886 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
4887 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
4888 is found, attempt to step through it. A breakpoint is placed at the end of
4889 the sequence. */
4890
4891 static int
4892 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
4893 {
4894 struct gdbarch *gdbarch = get_frame_arch (frame);
4895 struct address_space *aspace = get_frame_address_space (frame);
4896 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4897 CORE_ADDR pc = get_frame_pc (frame);
4898 CORE_ADDR breaks[2] = {-1, -1};
4899 CORE_ADDR loc = pc;
4900 unsigned short insn1, insn2;
4901 int insn_count;
4902 int index;
4903 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
4904 const int atomic_sequence_length = 16; /* Instruction sequence length. */
4905 ULONGEST status, itstate;
4906
4907 /* We currently do not support atomic sequences within an IT block. */
4908 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4909 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4910 if (itstate & 0x0f)
4911 return 0;
4912
4913 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
4914 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4915 loc += 2;
4916 if (thumb_insn_size (insn1) != 4)
4917 return 0;
4918
4919 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4920 loc += 2;
4921 if (!((insn1 & 0xfff0) == 0xe850
4922 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
4923 return 0;
4924
4925 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
4926 instructions. */
4927 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
4928 {
4929 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4930 loc += 2;
4931
4932 if (thumb_insn_size (insn1) != 4)
4933 {
4934 /* Assume that there is at most one conditional branch in the
4935 atomic sequence. If a conditional branch is found, put a
4936 breakpoint in its destination address. */
4937 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
4938 {
4939 if (last_breakpoint > 0)
4940 return 0; /* More than one conditional branch found,
4941 fallback to the standard code. */
4942
4943 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
4944 last_breakpoint++;
4945 }
4946
4947 /* We do not support atomic sequences that use any *other*
4948 instructions but conditional branches to change the PC.
4949 Fall back to standard code to avoid losing control of
4950 execution. */
4951 else if (thumb_instruction_changes_pc (insn1))
4952 return 0;
4953 }
4954 else
4955 {
4956 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
4957 loc += 2;
4958
4959 /* Assume that there is at most one conditional branch in the
4960 atomic sequence. If a conditional branch is found, put a
4961 breakpoint in its destination address. */
4962 if ((insn1 & 0xf800) == 0xf000
4963 && (insn2 & 0xd000) == 0x8000
4964 && (insn1 & 0x0380) != 0x0380)
4965 {
4966 int sign, j1, j2, imm1, imm2;
4967 unsigned int offset;
4968
4969 sign = sbits (insn1, 10, 10);
4970 imm1 = bits (insn1, 0, 5);
4971 imm2 = bits (insn2, 0, 10);
4972 j1 = bit (insn2, 13);
4973 j2 = bit (insn2, 11);
4974
4975 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4976 offset += (imm1 << 12) + (imm2 << 1);
4977
4978 if (last_breakpoint > 0)
4979 return 0; /* More than one conditional branch found,
4980 fallback to the standard code. */
4981
4982 breaks[1] = loc + offset;
4983 last_breakpoint++;
4984 }
4985
4986 /* We do not support atomic sequences that use any *other*
4987 instructions but conditional branches to change the PC.
4988 Fall back to standard code to avoid losing control of
4989 execution. */
4990 else if (thumb2_instruction_changes_pc (insn1, insn2))
4991 return 0;
4992
4993 /* If we find a strex{,b,h,d}, we're done. */
4994 if ((insn1 & 0xfff0) == 0xe840
4995 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
4996 break;
4997 }
4998 }
4999
5000 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5001 if (insn_count == atomic_sequence_length)
5002 return 0;
5003
5004 /* Insert a breakpoint right after the end of the atomic sequence. */
5005 breaks[0] = loc;
5006
5007 /* Check for duplicated breakpoints. Check also for a breakpoint
5008 placed (branch instruction's destination) anywhere in sequence. */
5009 if (last_breakpoint
5010 && (breaks[1] == breaks[0]
5011 || (breaks[1] >= pc && breaks[1] < loc)))
5012 last_breakpoint = 0;
5013
5014 /* Effectively inserts the breakpoints. */
5015 for (index = 0; index <= last_breakpoint; index++)
5016 arm_insert_single_step_breakpoint (gdbarch, aspace,
5017 MAKE_THUMB_ADDR (breaks[index]));
5018
5019 return 1;
5020 }
5021
5022 static int
5023 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5024 {
5025 struct gdbarch *gdbarch = get_frame_arch (frame);
5026 struct address_space *aspace = get_frame_address_space (frame);
5027 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5028 CORE_ADDR pc = get_frame_pc (frame);
5029 CORE_ADDR breaks[2] = {-1, -1};
5030 CORE_ADDR loc = pc;
5031 unsigned int insn;
5032 int insn_count;
5033 int index;
5034 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5035 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5036
5037 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5038 Note that we do not currently support conditionally executed atomic
5039 instructions. */
5040 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5041 loc += 4;
5042 if ((insn & 0xff9000f0) != 0xe1900090)
5043 return 0;
5044
5045 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5046 instructions. */
5047 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5048 {
5049 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5050 loc += 4;
5051
5052 /* Assume that there is at most one conditional branch in the atomic
5053 sequence. If a conditional branch is found, put a breakpoint in
5054 its destination address. */
5055 if (bits (insn, 24, 27) == 0xa)
5056 {
5057 if (last_breakpoint > 0)
5058 return 0; /* More than one conditional branch found, fallback
5059 to the standard single-step code. */
5060
5061 breaks[1] = BranchDest (loc - 4, insn);
5062 last_breakpoint++;
5063 }
5064
5065 /* We do not support atomic sequences that use any *other* instructions
5066 but conditional branches to change the PC. Fall back to standard
5067 code to avoid losing control of execution. */
5068 else if (arm_instruction_changes_pc (insn))
5069 return 0;
5070
5071 /* If we find a strex{,b,h,d}, we're done. */
5072 if ((insn & 0xff9000f0) == 0xe1800090)
5073 break;
5074 }
5075
5076 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5077 if (insn_count == atomic_sequence_length)
5078 return 0;
5079
5080 /* Insert a breakpoint right after the end of the atomic sequence. */
5081 breaks[0] = loc;
5082
5083 /* Check for duplicated breakpoints. Check also for a breakpoint
5084 placed (branch instruction's destination) anywhere in sequence. */
5085 if (last_breakpoint
5086 && (breaks[1] == breaks[0]
5087 || (breaks[1] >= pc && breaks[1] < loc)))
5088 last_breakpoint = 0;
5089
5090 /* Effectively inserts the breakpoints. */
5091 for (index = 0; index <= last_breakpoint; index++)
5092 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5093
5094 return 1;
5095 }
5096
5097 int
5098 arm_deal_with_atomic_sequence (struct frame_info *frame)
5099 {
5100 if (arm_frame_is_thumb (frame))
5101 return thumb_deal_with_atomic_sequence_raw (frame);
5102 else
5103 return arm_deal_with_atomic_sequence_raw (frame);
5104 }
5105
5106 /* single_step() is called just before we want to resume the inferior,
5107 if we want to single-step it but there is no hardware or kernel
5108 single-step support. We find the target of the coming instruction
5109 and breakpoint it. */
5110
5111 int
5112 arm_software_single_step (struct frame_info *frame)
5113 {
5114 struct gdbarch *gdbarch = get_frame_arch (frame);
5115 struct address_space *aspace = get_frame_address_space (frame);
5116 CORE_ADDR next_pc;
5117
5118 if (arm_deal_with_atomic_sequence (frame))
5119 return 1;
5120
5121 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5122 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5123
5124 return 1;
5125 }
5126
5127 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5128 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5129 NULL if an error occurs. BUF is freed. */
5130
5131 static gdb_byte *
5132 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5133 int old_len, int new_len)
5134 {
5135 gdb_byte *new_buf;
5136 int bytes_to_read = new_len - old_len;
5137
5138 new_buf = xmalloc (new_len);
5139 memcpy (new_buf + bytes_to_read, buf, old_len);
5140 xfree (buf);
5141 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5142 {
5143 xfree (new_buf);
5144 return NULL;
5145 }
5146 return new_buf;
5147 }
5148
5149 /* An IT block is at most the 2-byte IT instruction followed by
5150 four 4-byte instructions. The furthest back we must search to
5151 find an IT block that affects the current instruction is thus
5152 2 + 3 * 4 == 14 bytes. */
5153 #define MAX_IT_BLOCK_PREFIX 14
5154
5155 /* Use a quick scan if there are more than this many bytes of
5156 code. */
5157 #define IT_SCAN_THRESHOLD 32
5158
5159 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5160 A breakpoint in an IT block may not be hit, depending on the
5161 condition flags. */
5162 static CORE_ADDR
5163 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5164 {
5165 gdb_byte *buf;
5166 char map_type;
5167 CORE_ADDR boundary, func_start;
5168 int buf_len;
5169 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5170 int i, any, last_it, last_it_count;
5171
5172 /* If we are using BKPT breakpoints, none of this is necessary. */
5173 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5174 return bpaddr;
5175
5176 /* ARM mode does not have this problem. */
5177 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5178 return bpaddr;
5179
5180 /* We are setting a breakpoint in Thumb code that could potentially
5181 contain an IT block. The first step is to find how much Thumb
5182 code there is; we do not need to read outside of known Thumb
5183 sequences. */
5184 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5185 if (map_type == 0)
5186 /* Thumb-2 code must have mapping symbols to have a chance. */
5187 return bpaddr;
5188
5189 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5190
5191 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5192 && func_start > boundary)
5193 boundary = func_start;
5194
5195 /* Search for a candidate IT instruction. We have to do some fancy
5196 footwork to distinguish a real IT instruction from the second
5197 half of a 32-bit instruction, but there is no need for that if
5198 there's no candidate. */
5199 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5200 if (buf_len == 0)
5201 /* No room for an IT instruction. */
5202 return bpaddr;
5203
5204 buf = xmalloc (buf_len);
5205 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5206 return bpaddr;
5207 any = 0;
5208 for (i = 0; i < buf_len; i += 2)
5209 {
5210 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5211 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5212 {
5213 any = 1;
5214 break;
5215 }
5216 }
5217 if (any == 0)
5218 {
5219 xfree (buf);
5220 return bpaddr;
5221 }
5222
5223 /* OK, the code bytes before this instruction contain at least one
5224 halfword which resembles an IT instruction. We know that it's
5225 Thumb code, but there are still two possibilities. Either the
5226 halfword really is an IT instruction, or it is the second half of
5227 a 32-bit Thumb instruction. The only way we can tell is to
5228 scan forwards from a known instruction boundary. */
5229 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5230 {
5231 int definite;
5232
5233 /* There's a lot of code before this instruction. Start with an
5234 optimistic search; it's easy to recognize halfwords that can
5235 not be the start of a 32-bit instruction, and use that to
5236 lock on to the instruction boundaries. */
5237 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5238 if (buf == NULL)
5239 return bpaddr;
5240 buf_len = IT_SCAN_THRESHOLD;
5241
5242 definite = 0;
5243 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5244 {
5245 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5246 if (thumb_insn_size (inst1) == 2)
5247 {
5248 definite = 1;
5249 break;
5250 }
5251 }
5252
5253 /* At this point, if DEFINITE, BUF[I] is the first place we
5254 are sure that we know the instruction boundaries, and it is far
5255 enough from BPADDR that we could not miss an IT instruction
5256 affecting BPADDR. If ! DEFINITE, give up - start from a
5257 known boundary. */
5258 if (! definite)
5259 {
5260 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5261 bpaddr - boundary);
5262 if (buf == NULL)
5263 return bpaddr;
5264 buf_len = bpaddr - boundary;
5265 i = 0;
5266 }
5267 }
5268 else
5269 {
5270 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5271 if (buf == NULL)
5272 return bpaddr;
5273 buf_len = bpaddr - boundary;
5274 i = 0;
5275 }
5276
5277 /* Scan forwards. Find the last IT instruction before BPADDR. */
5278 last_it = -1;
5279 last_it_count = 0;
5280 while (i < buf_len)
5281 {
5282 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5283 last_it_count--;
5284 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5285 {
5286 last_it = i;
5287 if (inst1 & 0x0001)
5288 last_it_count = 4;
5289 else if (inst1 & 0x0002)
5290 last_it_count = 3;
5291 else if (inst1 & 0x0004)
5292 last_it_count = 2;
5293 else
5294 last_it_count = 1;
5295 }
5296 i += thumb_insn_size (inst1);
5297 }
5298
5299 xfree (buf);
5300
5301 if (last_it == -1)
5302 /* There wasn't really an IT instruction after all. */
5303 return bpaddr;
5304
5305 if (last_it_count < 1)
5306 /* It was too far away. */
5307 return bpaddr;
5308
5309 /* This really is a trouble spot. Move the breakpoint to the IT
5310 instruction. */
5311 return bpaddr - buf_len + last_it;
5312 }
5313
5314 /* ARM displaced stepping support.
5315
5316 Generally ARM displaced stepping works as follows:
5317
5318 1. When an instruction is to be single-stepped, it is first decoded by
5319 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5320 Depending on the type of instruction, it is then copied to a scratch
5321 location, possibly in a modified form. The copy_* set of functions
5322 performs such modification, as necessary. A breakpoint is placed after
5323 the modified instruction in the scratch space to return control to GDB.
5324 Note in particular that instructions which modify the PC will no longer
5325 do so after modification.
5326
5327 2. The instruction is single-stepped, by setting the PC to the scratch
5328 location address, and resuming. Control returns to GDB when the
5329 breakpoint is hit.
5330
5331 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5332 function used for the current instruction. This function's job is to
5333 put the CPU/memory state back to what it would have been if the
5334 instruction had been executed unmodified in its original location. */
5335
5336 /* NOP instruction (mov r0, r0). */
5337 #define ARM_NOP 0xe1a00000
5338 #define THUMB_NOP 0x4600
5339
5340 /* Helper for register reads for displaced stepping. In particular, this
5341 returns the PC as it would be seen by the instruction at its original
5342 location. */
5343
5344 ULONGEST
5345 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5346 int regno)
5347 {
5348 ULONGEST ret;
5349 CORE_ADDR from = dsc->insn_addr;
5350
5351 if (regno == ARM_PC_REGNUM)
5352 {
5353 /* Compute pipeline offset:
5354 - When executing an ARM instruction, PC reads as the address of the
5355 current instruction plus 8.
5356 - When executing a Thumb instruction, PC reads as the address of the
5357 current instruction plus 4. */
5358
5359 if (!dsc->is_thumb)
5360 from += 8;
5361 else
5362 from += 4;
5363
5364 if (debug_displaced)
5365 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5366 (unsigned long) from);
5367 return (ULONGEST) from;
5368 }
5369 else
5370 {
5371 regcache_cooked_read_unsigned (regs, regno, &ret);
5372 if (debug_displaced)
5373 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5374 regno, (unsigned long) ret);
5375 return ret;
5376 }
5377 }
5378
5379 static int
5380 displaced_in_arm_mode (struct regcache *regs)
5381 {
5382 ULONGEST ps;
5383 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5384
5385 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5386
5387 return (ps & t_bit) == 0;
5388 }
5389
5390 /* Write to the PC as from a branch instruction. */
5391
5392 static void
5393 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5394 ULONGEST val)
5395 {
5396 if (!dsc->is_thumb)
5397 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5398 architecture versions < 6. */
5399 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5400 val & ~(ULONGEST) 0x3);
5401 else
5402 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5403 val & ~(ULONGEST) 0x1);
5404 }
5405
5406 /* Write to the PC as from a branch-exchange instruction. */
5407
5408 static void
5409 bx_write_pc (struct regcache *regs, ULONGEST val)
5410 {
5411 ULONGEST ps;
5412 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5413
5414 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5415
5416 if ((val & 1) == 1)
5417 {
5418 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5419 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5420 }
5421 else if ((val & 2) == 0)
5422 {
5423 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5424 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5425 }
5426 else
5427 {
5428 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5429 mode, align dest to 4 bytes). */
5430 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5431 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5432 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5433 }
5434 }
5435
5436 /* Write to the PC as if from a load instruction. */
5437
5438 static void
5439 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5440 ULONGEST val)
5441 {
5442 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5443 bx_write_pc (regs, val);
5444 else
5445 branch_write_pc (regs, dsc, val);
5446 }
5447
5448 /* Write to the PC as if from an ALU instruction. */
5449
5450 static void
5451 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5452 ULONGEST val)
5453 {
5454 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5455 bx_write_pc (regs, val);
5456 else
5457 branch_write_pc (regs, dsc, val);
5458 }
5459
5460 /* Helper for writing to registers for displaced stepping. Writing to the PC
5461 has a varying effects depending on the instruction which does the write:
5462 this is controlled by the WRITE_PC argument. */
5463
5464 void
5465 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5466 int regno, ULONGEST val, enum pc_write_style write_pc)
5467 {
5468 if (regno == ARM_PC_REGNUM)
5469 {
5470 if (debug_displaced)
5471 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5472 (unsigned long) val);
5473 switch (write_pc)
5474 {
5475 case BRANCH_WRITE_PC:
5476 branch_write_pc (regs, dsc, val);
5477 break;
5478
5479 case BX_WRITE_PC:
5480 bx_write_pc (regs, val);
5481 break;
5482
5483 case LOAD_WRITE_PC:
5484 load_write_pc (regs, dsc, val);
5485 break;
5486
5487 case ALU_WRITE_PC:
5488 alu_write_pc (regs, dsc, val);
5489 break;
5490
5491 case CANNOT_WRITE_PC:
5492 warning (_("Instruction wrote to PC in an unexpected way when "
5493 "single-stepping"));
5494 break;
5495
5496 default:
5497 internal_error (__FILE__, __LINE__,
5498 _("Invalid argument to displaced_write_reg"));
5499 }
5500
5501 dsc->wrote_to_pc = 1;
5502 }
5503 else
5504 {
5505 if (debug_displaced)
5506 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5507 regno, (unsigned long) val);
5508 regcache_cooked_write_unsigned (regs, regno, val);
5509 }
5510 }
5511
5512 /* This function is used to concisely determine if an instruction INSN
5513 references PC. Register fields of interest in INSN should have the
5514 corresponding fields of BITMASK set to 0b1111. The function
5515 returns return 1 if any of these fields in INSN reference the PC
5516 (also 0b1111, r15), else it returns 0. */
5517
5518 static int
5519 insn_references_pc (uint32_t insn, uint32_t bitmask)
5520 {
5521 uint32_t lowbit = 1;
5522
5523 while (bitmask != 0)
5524 {
5525 uint32_t mask;
5526
5527 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5528 ;
5529
5530 if (!lowbit)
5531 break;
5532
5533 mask = lowbit * 0xf;
5534
5535 if ((insn & mask) == mask)
5536 return 1;
5537
5538 bitmask &= ~mask;
5539 }
5540
5541 return 0;
5542 }
5543
5544 /* The simplest copy function. Many instructions have the same effect no
5545 matter what address they are executed at: in those cases, use this. */
5546
5547 static int
5548 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5549 const char *iname, struct displaced_step_closure *dsc)
5550 {
5551 if (debug_displaced)
5552 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5553 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5554 iname);
5555
5556 dsc->modinsn[0] = insn;
5557
5558 return 0;
5559 }
5560
5561 static int
5562 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5563 uint16_t insn2, const char *iname,
5564 struct displaced_step_closure *dsc)
5565 {
5566 if (debug_displaced)
5567 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5568 "opcode/class '%s' unmodified\n", insn1, insn2,
5569 iname);
5570
5571 dsc->modinsn[0] = insn1;
5572 dsc->modinsn[1] = insn2;
5573 dsc->numinsns = 2;
5574
5575 return 0;
5576 }
5577
5578 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5579 modification. */
5580 static int
5581 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5582 const char *iname,
5583 struct displaced_step_closure *dsc)
5584 {
5585 if (debug_displaced)
5586 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5587 "opcode/class '%s' unmodified\n", insn,
5588 iname);
5589
5590 dsc->modinsn[0] = insn;
5591
5592 return 0;
5593 }
5594
5595 /* Preload instructions with immediate offset. */
5596
5597 static void
5598 cleanup_preload (struct gdbarch *gdbarch,
5599 struct regcache *regs, struct displaced_step_closure *dsc)
5600 {
5601 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5602 if (!dsc->u.preload.immed)
5603 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5604 }
5605
5606 static void
5607 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5608 struct displaced_step_closure *dsc, unsigned int rn)
5609 {
5610 ULONGEST rn_val;
5611 /* Preload instructions:
5612
5613 {pli/pld} [rn, #+/-imm]
5614 ->
5615 {pli/pld} [r0, #+/-imm]. */
5616
5617 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5618 rn_val = displaced_read_reg (regs, dsc, rn);
5619 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5620 dsc->u.preload.immed = 1;
5621
5622 dsc->cleanup = &cleanup_preload;
5623 }
5624
5625 static int
5626 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5627 struct displaced_step_closure *dsc)
5628 {
5629 unsigned int rn = bits (insn, 16, 19);
5630
5631 if (!insn_references_pc (insn, 0x000f0000ul))
5632 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5633
5634 if (debug_displaced)
5635 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5636 (unsigned long) insn);
5637
5638 dsc->modinsn[0] = insn & 0xfff0ffff;
5639
5640 install_preload (gdbarch, regs, dsc, rn);
5641
5642 return 0;
5643 }
5644
5645 static int
5646 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5647 struct regcache *regs, struct displaced_step_closure *dsc)
5648 {
5649 unsigned int rn = bits (insn1, 0, 3);
5650 unsigned int u_bit = bit (insn1, 7);
5651 int imm12 = bits (insn2, 0, 11);
5652 ULONGEST pc_val;
5653
5654 if (rn != ARM_PC_REGNUM)
5655 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5656
5657 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5658 PLD (literal) Encoding T1. */
5659 if (debug_displaced)
5660 fprintf_unfiltered (gdb_stdlog,
5661 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5662 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5663 imm12);
5664
5665 if (!u_bit)
5666 imm12 = -1 * imm12;
5667
5668 /* Rewrite instruction {pli/pld} PC imm12 into:
5669 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5670
5671 {pli/pld} [r0, r1]
5672
5673 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5674
5675 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5676 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5677
5678 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5679
5680 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5681 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5682 dsc->u.preload.immed = 0;
5683
5684 /* {pli/pld} [r0, r1] */
5685 dsc->modinsn[0] = insn1 & 0xfff0;
5686 dsc->modinsn[1] = 0xf001;
5687 dsc->numinsns = 2;
5688
5689 dsc->cleanup = &cleanup_preload;
5690 return 0;
5691 }
5692
5693 /* Preload instructions with register offset. */
5694
5695 static void
5696 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5697 struct displaced_step_closure *dsc, unsigned int rn,
5698 unsigned int rm)
5699 {
5700 ULONGEST rn_val, rm_val;
5701
5702 /* Preload register-offset instructions:
5703
5704 {pli/pld} [rn, rm {, shift}]
5705 ->
5706 {pli/pld} [r0, r1 {, shift}]. */
5707
5708 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5709 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5710 rn_val = displaced_read_reg (regs, dsc, rn);
5711 rm_val = displaced_read_reg (regs, dsc, rm);
5712 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5713 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5714 dsc->u.preload.immed = 0;
5715
5716 dsc->cleanup = &cleanup_preload;
5717 }
5718
5719 static int
5720 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5721 struct regcache *regs,
5722 struct displaced_step_closure *dsc)
5723 {
5724 unsigned int rn = bits (insn, 16, 19);
5725 unsigned int rm = bits (insn, 0, 3);
5726
5727
5728 if (!insn_references_pc (insn, 0x000f000ful))
5729 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5730
5731 if (debug_displaced)
5732 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5733 (unsigned long) insn);
5734
5735 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5736
5737 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5738 return 0;
5739 }
5740
5741 /* Copy/cleanup coprocessor load and store instructions. */
5742
5743 static void
5744 cleanup_copro_load_store (struct gdbarch *gdbarch,
5745 struct regcache *regs,
5746 struct displaced_step_closure *dsc)
5747 {
5748 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5749
5750 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5751
5752 if (dsc->u.ldst.writeback)
5753 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5754 }
5755
5756 static void
5757 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5758 struct displaced_step_closure *dsc,
5759 int writeback, unsigned int rn)
5760 {
5761 ULONGEST rn_val;
5762
5763 /* Coprocessor load/store instructions:
5764
5765 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5766 ->
5767 {stc/stc2} [r0, #+/-imm].
5768
5769 ldc/ldc2 are handled identically. */
5770
5771 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5772 rn_val = displaced_read_reg (regs, dsc, rn);
5773 /* PC should be 4-byte aligned. */
5774 rn_val = rn_val & 0xfffffffc;
5775 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5776
5777 dsc->u.ldst.writeback = writeback;
5778 dsc->u.ldst.rn = rn;
5779
5780 dsc->cleanup = &cleanup_copro_load_store;
5781 }
5782
5783 static int
5784 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5785 struct regcache *regs,
5786 struct displaced_step_closure *dsc)
5787 {
5788 unsigned int rn = bits (insn, 16, 19);
5789
5790 if (!insn_references_pc (insn, 0x000f0000ul))
5791 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5792
5793 if (debug_displaced)
5794 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5795 "load/store insn %.8lx\n", (unsigned long) insn);
5796
5797 dsc->modinsn[0] = insn & 0xfff0ffff;
5798
5799 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5800
5801 return 0;
5802 }
5803
5804 static int
5805 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5806 uint16_t insn2, struct regcache *regs,
5807 struct displaced_step_closure *dsc)
5808 {
5809 unsigned int rn = bits (insn1, 0, 3);
5810
5811 if (rn != ARM_PC_REGNUM)
5812 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5813 "copro load/store", dsc);
5814
5815 if (debug_displaced)
5816 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5817 "load/store insn %.4x%.4x\n", insn1, insn2);
5818
5819 dsc->modinsn[0] = insn1 & 0xfff0;
5820 dsc->modinsn[1] = insn2;
5821 dsc->numinsns = 2;
5822
5823 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5824 doesn't support writeback, so pass 0. */
5825 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5826
5827 return 0;
5828 }
5829
5830 /* Clean up branch instructions (actually perform the branch, by setting
5831 PC). */
5832
5833 static void
5834 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5835 struct displaced_step_closure *dsc)
5836 {
5837 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5838 int branch_taken = condition_true (dsc->u.branch.cond, status);
5839 enum pc_write_style write_pc = dsc->u.branch.exchange
5840 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5841
5842 if (!branch_taken)
5843 return;
5844
5845 if (dsc->u.branch.link)
5846 {
5847 /* The value of LR should be the next insn of current one. In order
5848 not to confuse logic hanlding later insn `bx lr', if current insn mode
5849 is Thumb, the bit 0 of LR value should be set to 1. */
5850 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5851
5852 if (dsc->is_thumb)
5853 next_insn_addr |= 0x1;
5854
5855 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5856 CANNOT_WRITE_PC);
5857 }
5858
5859 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5860 }
5861
5862 /* Copy B/BL/BLX instructions with immediate destinations. */
5863
5864 static void
5865 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5866 struct displaced_step_closure *dsc,
5867 unsigned int cond, int exchange, int link, long offset)
5868 {
5869 /* Implement "BL<cond> <label>" as:
5870
5871 Preparation: cond <- instruction condition
5872 Insn: mov r0, r0 (nop)
5873 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5874
5875 B<cond> similar, but don't set r14 in cleanup. */
5876
5877 dsc->u.branch.cond = cond;
5878 dsc->u.branch.link = link;
5879 dsc->u.branch.exchange = exchange;
5880
5881 dsc->u.branch.dest = dsc->insn_addr;
5882 if (link && exchange)
5883 /* For BLX, offset is computed from the Align (PC, 4). */
5884 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
5885
5886 if (dsc->is_thumb)
5887 dsc->u.branch.dest += 4 + offset;
5888 else
5889 dsc->u.branch.dest += 8 + offset;
5890
5891 dsc->cleanup = &cleanup_branch;
5892 }
5893 static int
5894 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
5895 struct regcache *regs, struct displaced_step_closure *dsc)
5896 {
5897 unsigned int cond = bits (insn, 28, 31);
5898 int exchange = (cond == 0xf);
5899 int link = exchange || bit (insn, 24);
5900 long offset;
5901
5902 if (debug_displaced)
5903 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
5904 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
5905 (unsigned long) insn);
5906 if (exchange)
5907 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
5908 then arrange the switch into Thumb mode. */
5909 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
5910 else
5911 offset = bits (insn, 0, 23) << 2;
5912
5913 if (bit (offset, 25))
5914 offset = offset | ~0x3ffffff;
5915
5916 dsc->modinsn[0] = ARM_NOP;
5917
5918 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5919 return 0;
5920 }
5921
5922 static int
5923 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
5924 uint16_t insn2, struct regcache *regs,
5925 struct displaced_step_closure *dsc)
5926 {
5927 int link = bit (insn2, 14);
5928 int exchange = link && !bit (insn2, 12);
5929 int cond = INST_AL;
5930 long offset = 0;
5931 int j1 = bit (insn2, 13);
5932 int j2 = bit (insn2, 11);
5933 int s = sbits (insn1, 10, 10);
5934 int i1 = !(j1 ^ bit (insn1, 10));
5935 int i2 = !(j2 ^ bit (insn1, 10));
5936
5937 if (!link && !exchange) /* B */
5938 {
5939 offset = (bits (insn2, 0, 10) << 1);
5940 if (bit (insn2, 12)) /* Encoding T4 */
5941 {
5942 offset |= (bits (insn1, 0, 9) << 12)
5943 | (i2 << 22)
5944 | (i1 << 23)
5945 | (s << 24);
5946 cond = INST_AL;
5947 }
5948 else /* Encoding T3 */
5949 {
5950 offset |= (bits (insn1, 0, 5) << 12)
5951 | (j1 << 18)
5952 | (j2 << 19)
5953 | (s << 20);
5954 cond = bits (insn1, 6, 9);
5955 }
5956 }
5957 else
5958 {
5959 offset = (bits (insn1, 0, 9) << 12);
5960 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
5961 offset |= exchange ?
5962 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
5963 }
5964
5965 if (debug_displaced)
5966 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
5967 "%.4x %.4x with offset %.8lx\n",
5968 link ? (exchange) ? "blx" : "bl" : "b",
5969 insn1, insn2, offset);
5970
5971 dsc->modinsn[0] = THUMB_NOP;
5972
5973 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
5974 return 0;
5975 }
5976
5977 /* Copy B Thumb instructions. */
5978 static int
5979 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
5980 struct displaced_step_closure *dsc)
5981 {
5982 unsigned int cond = 0;
5983 int offset = 0;
5984 unsigned short bit_12_15 = bits (insn, 12, 15);
5985 CORE_ADDR from = dsc->insn_addr;
5986
5987 if (bit_12_15 == 0xd)
5988 {
5989 /* offset = SignExtend (imm8:0, 32) */
5990 offset = sbits ((insn << 1), 0, 8);
5991 cond = bits (insn, 8, 11);
5992 }
5993 else if (bit_12_15 == 0xe) /* Encoding T2 */
5994 {
5995 offset = sbits ((insn << 1), 0, 11);
5996 cond = INST_AL;
5997 }
5998
5999 if (debug_displaced)
6000 fprintf_unfiltered (gdb_stdlog,
6001 "displaced: copying b immediate insn %.4x "
6002 "with offset %d\n", insn, offset);
6003
6004 dsc->u.branch.cond = cond;
6005 dsc->u.branch.link = 0;
6006 dsc->u.branch.exchange = 0;
6007 dsc->u.branch.dest = from + 4 + offset;
6008
6009 dsc->modinsn[0] = THUMB_NOP;
6010
6011 dsc->cleanup = &cleanup_branch;
6012
6013 return 0;
6014 }
6015
6016 /* Copy BX/BLX with register-specified destinations. */
6017
6018 static void
6019 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6020 struct displaced_step_closure *dsc, int link,
6021 unsigned int cond, unsigned int rm)
6022 {
6023 /* Implement {BX,BLX}<cond> <reg>" as:
6024
6025 Preparation: cond <- instruction condition
6026 Insn: mov r0, r0 (nop)
6027 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6028
6029 Don't set r14 in cleanup for BX. */
6030
6031 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6032
6033 dsc->u.branch.cond = cond;
6034 dsc->u.branch.link = link;
6035
6036 dsc->u.branch.exchange = 1;
6037
6038 dsc->cleanup = &cleanup_branch;
6039 }
6040
6041 static int
6042 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6043 struct regcache *regs, struct displaced_step_closure *dsc)
6044 {
6045 unsigned int cond = bits (insn, 28, 31);
6046 /* BX: x12xxx1x
6047 BLX: x12xxx3x. */
6048 int link = bit (insn, 5);
6049 unsigned int rm = bits (insn, 0, 3);
6050
6051 if (debug_displaced)
6052 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6053 (unsigned long) insn);
6054
6055 dsc->modinsn[0] = ARM_NOP;
6056
6057 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6058 return 0;
6059 }
6060
6061 static int
6062 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6063 struct regcache *regs,
6064 struct displaced_step_closure *dsc)
6065 {
6066 int link = bit (insn, 7);
6067 unsigned int rm = bits (insn, 3, 6);
6068
6069 if (debug_displaced)
6070 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6071 (unsigned short) insn);
6072
6073 dsc->modinsn[0] = THUMB_NOP;
6074
6075 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6076
6077 return 0;
6078 }
6079
6080
6081 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6082
6083 static void
6084 cleanup_alu_imm (struct gdbarch *gdbarch,
6085 struct regcache *regs, struct displaced_step_closure *dsc)
6086 {
6087 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6088 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6089 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6090 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6091 }
6092
6093 static int
6094 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6095 struct displaced_step_closure *dsc)
6096 {
6097 unsigned int rn = bits (insn, 16, 19);
6098 unsigned int rd = bits (insn, 12, 15);
6099 unsigned int op = bits (insn, 21, 24);
6100 int is_mov = (op == 0xd);
6101 ULONGEST rd_val, rn_val;
6102
6103 if (!insn_references_pc (insn, 0x000ff000ul))
6104 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6105
6106 if (debug_displaced)
6107 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6108 "%.8lx\n", is_mov ? "move" : "ALU",
6109 (unsigned long) insn);
6110
6111 /* Instruction is of form:
6112
6113 <op><cond> rd, [rn,] #imm
6114
6115 Rewrite as:
6116
6117 Preparation: tmp1, tmp2 <- r0, r1;
6118 r0, r1 <- rd, rn
6119 Insn: <op><cond> r0, r1, #imm
6120 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6121 */
6122
6123 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6124 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6125 rn_val = displaced_read_reg (regs, dsc, rn);
6126 rd_val = displaced_read_reg (regs, dsc, rd);
6127 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6128 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6129 dsc->rd = rd;
6130
6131 if (is_mov)
6132 dsc->modinsn[0] = insn & 0xfff00fff;
6133 else
6134 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6135
6136 dsc->cleanup = &cleanup_alu_imm;
6137
6138 return 0;
6139 }
6140
6141 static int
6142 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6143 uint16_t insn2, struct regcache *regs,
6144 struct displaced_step_closure *dsc)
6145 {
6146 unsigned int op = bits (insn1, 5, 8);
6147 unsigned int rn, rm, rd;
6148 ULONGEST rd_val, rn_val;
6149
6150 rn = bits (insn1, 0, 3); /* Rn */
6151 rm = bits (insn2, 0, 3); /* Rm */
6152 rd = bits (insn2, 8, 11); /* Rd */
6153
6154 /* This routine is only called for instruction MOV. */
6155 gdb_assert (op == 0x2 && rn == 0xf);
6156
6157 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6158 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6159
6160 if (debug_displaced)
6161 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6162 "ALU", insn1, insn2);
6163
6164 /* Instruction is of form:
6165
6166 <op><cond> rd, [rn,] #imm
6167
6168 Rewrite as:
6169
6170 Preparation: tmp1, tmp2 <- r0, r1;
6171 r0, r1 <- rd, rn
6172 Insn: <op><cond> r0, r1, #imm
6173 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6174 */
6175
6176 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6177 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6178 rn_val = displaced_read_reg (regs, dsc, rn);
6179 rd_val = displaced_read_reg (regs, dsc, rd);
6180 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6181 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6182 dsc->rd = rd;
6183
6184 dsc->modinsn[0] = insn1;
6185 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6186 dsc->numinsns = 2;
6187
6188 dsc->cleanup = &cleanup_alu_imm;
6189
6190 return 0;
6191 }
6192
6193 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6194
6195 static void
6196 cleanup_alu_reg (struct gdbarch *gdbarch,
6197 struct regcache *regs, struct displaced_step_closure *dsc)
6198 {
6199 ULONGEST rd_val;
6200 int i;
6201
6202 rd_val = displaced_read_reg (regs, dsc, 0);
6203
6204 for (i = 0; i < 3; i++)
6205 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6206
6207 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6208 }
6209
6210 static void
6211 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6212 struct displaced_step_closure *dsc,
6213 unsigned int rd, unsigned int rn, unsigned int rm)
6214 {
6215 ULONGEST rd_val, rn_val, rm_val;
6216
6217 /* Instruction is of form:
6218
6219 <op><cond> rd, [rn,] rm [, <shift>]
6220
6221 Rewrite as:
6222
6223 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6224 r0, r1, r2 <- rd, rn, rm
6225 Insn: <op><cond> r0, r1, r2 [, <shift>]
6226 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6227 */
6228
6229 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6230 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6231 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6232 rd_val = displaced_read_reg (regs, dsc, rd);
6233 rn_val = displaced_read_reg (regs, dsc, rn);
6234 rm_val = displaced_read_reg (regs, dsc, rm);
6235 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6236 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6237 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6238 dsc->rd = rd;
6239
6240 dsc->cleanup = &cleanup_alu_reg;
6241 }
6242
6243 static int
6244 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6245 struct displaced_step_closure *dsc)
6246 {
6247 unsigned int op = bits (insn, 21, 24);
6248 int is_mov = (op == 0xd);
6249
6250 if (!insn_references_pc (insn, 0x000ff00ful))
6251 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6252
6253 if (debug_displaced)
6254 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6255 is_mov ? "move" : "ALU", (unsigned long) insn);
6256
6257 if (is_mov)
6258 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6259 else
6260 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6261
6262 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6263 bits (insn, 0, 3));
6264 return 0;
6265 }
6266
6267 static int
6268 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6269 struct regcache *regs,
6270 struct displaced_step_closure *dsc)
6271 {
6272 unsigned rn, rm, rd;
6273
6274 rd = bits (insn, 3, 6);
6275 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6276 rm = 2;
6277
6278 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6279 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6280
6281 if (debug_displaced)
6282 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6283 "ALU", (unsigned short) insn);
6284
6285 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6286
6287 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6288
6289 return 0;
6290 }
6291
6292 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6293
6294 static void
6295 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6296 struct regcache *regs,
6297 struct displaced_step_closure *dsc)
6298 {
6299 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6300 int i;
6301
6302 for (i = 0; i < 4; i++)
6303 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6304
6305 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6306 }
6307
6308 static void
6309 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6310 struct displaced_step_closure *dsc,
6311 unsigned int rd, unsigned int rn, unsigned int rm,
6312 unsigned rs)
6313 {
6314 int i;
6315 ULONGEST rd_val, rn_val, rm_val, rs_val;
6316
6317 /* Instruction is of form:
6318
6319 <op><cond> rd, [rn,] rm, <shift> rs
6320
6321 Rewrite as:
6322
6323 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6324 r0, r1, r2, r3 <- rd, rn, rm, rs
6325 Insn: <op><cond> r0, r1, r2, <shift> r3
6326 Cleanup: tmp5 <- r0
6327 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6328 rd <- tmp5
6329 */
6330
6331 for (i = 0; i < 4; i++)
6332 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6333
6334 rd_val = displaced_read_reg (regs, dsc, rd);
6335 rn_val = displaced_read_reg (regs, dsc, rn);
6336 rm_val = displaced_read_reg (regs, dsc, rm);
6337 rs_val = displaced_read_reg (regs, dsc, rs);
6338 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6339 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6340 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6341 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6342 dsc->rd = rd;
6343 dsc->cleanup = &cleanup_alu_shifted_reg;
6344 }
6345
6346 static int
6347 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6348 struct regcache *regs,
6349 struct displaced_step_closure *dsc)
6350 {
6351 unsigned int op = bits (insn, 21, 24);
6352 int is_mov = (op == 0xd);
6353 unsigned int rd, rn, rm, rs;
6354
6355 if (!insn_references_pc (insn, 0x000fff0ful))
6356 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6357
6358 if (debug_displaced)
6359 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6360 "%.8lx\n", is_mov ? "move" : "ALU",
6361 (unsigned long) insn);
6362
6363 rn = bits (insn, 16, 19);
6364 rm = bits (insn, 0, 3);
6365 rs = bits (insn, 8, 11);
6366 rd = bits (insn, 12, 15);
6367
6368 if (is_mov)
6369 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6370 else
6371 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6372
6373 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6374
6375 return 0;
6376 }
6377
6378 /* Clean up load instructions. */
6379
6380 static void
6381 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6382 struct displaced_step_closure *dsc)
6383 {
6384 ULONGEST rt_val, rt_val2 = 0, rn_val;
6385
6386 rt_val = displaced_read_reg (regs, dsc, 0);
6387 if (dsc->u.ldst.xfersize == 8)
6388 rt_val2 = displaced_read_reg (regs, dsc, 1);
6389 rn_val = displaced_read_reg (regs, dsc, 2);
6390
6391 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6392 if (dsc->u.ldst.xfersize > 4)
6393 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6394 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6395 if (!dsc->u.ldst.immed)
6396 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6397
6398 /* Handle register writeback. */
6399 if (dsc->u.ldst.writeback)
6400 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6401 /* Put result in right place. */
6402 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6403 if (dsc->u.ldst.xfersize == 8)
6404 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6405 }
6406
6407 /* Clean up store instructions. */
6408
6409 static void
6410 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6411 struct displaced_step_closure *dsc)
6412 {
6413 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6414
6415 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6416 if (dsc->u.ldst.xfersize > 4)
6417 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6418 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6419 if (!dsc->u.ldst.immed)
6420 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6421 if (!dsc->u.ldst.restore_r4)
6422 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6423
6424 /* Writeback. */
6425 if (dsc->u.ldst.writeback)
6426 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6427 }
6428
6429 /* Copy "extra" load/store instructions. These are halfword/doubleword
6430 transfers, which have a different encoding to byte/word transfers. */
6431
6432 static int
6433 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6434 struct regcache *regs, struct displaced_step_closure *dsc)
6435 {
6436 unsigned int op1 = bits (insn, 20, 24);
6437 unsigned int op2 = bits (insn, 5, 6);
6438 unsigned int rt = bits (insn, 12, 15);
6439 unsigned int rn = bits (insn, 16, 19);
6440 unsigned int rm = bits (insn, 0, 3);
6441 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6442 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6443 int immed = (op1 & 0x4) != 0;
6444 int opcode;
6445 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6446
6447 if (!insn_references_pc (insn, 0x000ff00ful))
6448 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6449
6450 if (debug_displaced)
6451 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6452 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6453 (unsigned long) insn);
6454
6455 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6456
6457 if (opcode < 0)
6458 internal_error (__FILE__, __LINE__,
6459 _("copy_extra_ld_st: instruction decode error"));
6460
6461 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6462 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6463 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6464 if (!immed)
6465 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6466
6467 rt_val = displaced_read_reg (regs, dsc, rt);
6468 if (bytesize[opcode] == 8)
6469 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6470 rn_val = displaced_read_reg (regs, dsc, rn);
6471 if (!immed)
6472 rm_val = displaced_read_reg (regs, dsc, rm);
6473
6474 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6475 if (bytesize[opcode] == 8)
6476 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6477 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6478 if (!immed)
6479 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6480
6481 dsc->rd = rt;
6482 dsc->u.ldst.xfersize = bytesize[opcode];
6483 dsc->u.ldst.rn = rn;
6484 dsc->u.ldst.immed = immed;
6485 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6486 dsc->u.ldst.restore_r4 = 0;
6487
6488 if (immed)
6489 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6490 ->
6491 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6492 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6493 else
6494 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6495 ->
6496 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6497 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6498
6499 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6500
6501 return 0;
6502 }
6503
6504 /* Copy byte/half word/word loads and stores. */
6505
6506 static void
6507 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6508 struct displaced_step_closure *dsc, int load,
6509 int immed, int writeback, int size, int usermode,
6510 int rt, int rm, int rn)
6511 {
6512 ULONGEST rt_val, rn_val, rm_val = 0;
6513
6514 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6515 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6516 if (!immed)
6517 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6518 if (!load)
6519 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6520
6521 rt_val = displaced_read_reg (regs, dsc, rt);
6522 rn_val = displaced_read_reg (regs, dsc, rn);
6523 if (!immed)
6524 rm_val = displaced_read_reg (regs, dsc, rm);
6525
6526 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6527 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6528 if (!immed)
6529 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6530 dsc->rd = rt;
6531 dsc->u.ldst.xfersize = size;
6532 dsc->u.ldst.rn = rn;
6533 dsc->u.ldst.immed = immed;
6534 dsc->u.ldst.writeback = writeback;
6535
6536 /* To write PC we can do:
6537
6538 Before this sequence of instructions:
6539 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6540 r2 is the Rn value got from dispalced_read_reg.
6541
6542 Insn1: push {pc} Write address of STR instruction + offset on stack
6543 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6544 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6545 = addr(Insn1) + offset - addr(Insn3) - 8
6546 = offset - 16
6547 Insn4: add r4, r4, #8 r4 = offset - 8
6548 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6549 = from + offset
6550 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6551
6552 Otherwise we don't know what value to write for PC, since the offset is
6553 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6554 of this can be found in Section "Saving from r15" in
6555 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6556
6557 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6558 }
6559
6560
6561 static int
6562 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6563 uint16_t insn2, struct regcache *regs,
6564 struct displaced_step_closure *dsc, int size)
6565 {
6566 unsigned int u_bit = bit (insn1, 7);
6567 unsigned int rt = bits (insn2, 12, 15);
6568 int imm12 = bits (insn2, 0, 11);
6569 ULONGEST pc_val;
6570
6571 if (debug_displaced)
6572 fprintf_unfiltered (gdb_stdlog,
6573 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6574 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6575 imm12);
6576
6577 if (!u_bit)
6578 imm12 = -1 * imm12;
6579
6580 /* Rewrite instruction LDR Rt imm12 into:
6581
6582 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6583
6584 LDR R0, R2, R3,
6585
6586 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6587
6588
6589 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6590 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6591 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6592
6593 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6594
6595 pc_val = pc_val & 0xfffffffc;
6596
6597 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6598 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6599
6600 dsc->rd = rt;
6601
6602 dsc->u.ldst.xfersize = size;
6603 dsc->u.ldst.immed = 0;
6604 dsc->u.ldst.writeback = 0;
6605 dsc->u.ldst.restore_r4 = 0;
6606
6607 /* LDR R0, R2, R3 */
6608 dsc->modinsn[0] = 0xf852;
6609 dsc->modinsn[1] = 0x3;
6610 dsc->numinsns = 2;
6611
6612 dsc->cleanup = &cleanup_load;
6613
6614 return 0;
6615 }
6616
6617 static int
6618 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6619 uint16_t insn2, struct regcache *regs,
6620 struct displaced_step_closure *dsc,
6621 int writeback, int immed)
6622 {
6623 unsigned int rt = bits (insn2, 12, 15);
6624 unsigned int rn = bits (insn1, 0, 3);
6625 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6626 /* In LDR (register), there is also a register Rm, which is not allowed to
6627 be PC, so we don't have to check it. */
6628
6629 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6630 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6631 dsc);
6632
6633 if (debug_displaced)
6634 fprintf_unfiltered (gdb_stdlog,
6635 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6636 rt, rn, insn1, insn2);
6637
6638 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6639 0, rt, rm, rn);
6640
6641 dsc->u.ldst.restore_r4 = 0;
6642
6643 if (immed)
6644 /* ldr[b]<cond> rt, [rn, #imm], etc.
6645 ->
6646 ldr[b]<cond> r0, [r2, #imm]. */
6647 {
6648 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6649 dsc->modinsn[1] = insn2 & 0x0fff;
6650 }
6651 else
6652 /* ldr[b]<cond> rt, [rn, rm], etc.
6653 ->
6654 ldr[b]<cond> r0, [r2, r3]. */
6655 {
6656 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6657 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6658 }
6659
6660 dsc->numinsns = 2;
6661
6662 return 0;
6663 }
6664
6665
6666 static int
6667 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6668 struct regcache *regs,
6669 struct displaced_step_closure *dsc,
6670 int load, int size, int usermode)
6671 {
6672 int immed = !bit (insn, 25);
6673 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6674 unsigned int rt = bits (insn, 12, 15);
6675 unsigned int rn = bits (insn, 16, 19);
6676 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6677
6678 if (!insn_references_pc (insn, 0x000ff00ful))
6679 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6680
6681 if (debug_displaced)
6682 fprintf_unfiltered (gdb_stdlog,
6683 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6684 load ? (size == 1 ? "ldrb" : "ldr")
6685 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6686 rt, rn,
6687 (unsigned long) insn);
6688
6689 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6690 usermode, rt, rm, rn);
6691
6692 if (load || rt != ARM_PC_REGNUM)
6693 {
6694 dsc->u.ldst.restore_r4 = 0;
6695
6696 if (immed)
6697 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6698 ->
6699 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6700 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6701 else
6702 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6703 ->
6704 {ldr,str}[b]<cond> r0, [r2, r3]. */
6705 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6706 }
6707 else
6708 {
6709 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6710 dsc->u.ldst.restore_r4 = 1;
6711 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6712 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6713 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6714 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6715 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6716
6717 /* As above. */
6718 if (immed)
6719 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6720 else
6721 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6722
6723 dsc->numinsns = 6;
6724 }
6725
6726 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6727
6728 return 0;
6729 }
6730
6731 /* Cleanup LDM instructions with fully-populated register list. This is an
6732 unfortunate corner case: it's impossible to implement correctly by modifying
6733 the instruction. The issue is as follows: we have an instruction,
6734
6735 ldm rN, {r0-r15}
6736
6737 which we must rewrite to avoid loading PC. A possible solution would be to
6738 do the load in two halves, something like (with suitable cleanup
6739 afterwards):
6740
6741 mov r8, rN
6742 ldm[id][ab] r8!, {r0-r7}
6743 str r7, <temp>
6744 ldm[id][ab] r8, {r7-r14}
6745 <bkpt>
6746
6747 but at present there's no suitable place for <temp>, since the scratch space
6748 is overwritten before the cleanup routine is called. For now, we simply
6749 emulate the instruction. */
6750
6751 static void
6752 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6753 struct displaced_step_closure *dsc)
6754 {
6755 int inc = dsc->u.block.increment;
6756 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6757 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6758 uint32_t regmask = dsc->u.block.regmask;
6759 int regno = inc ? 0 : 15;
6760 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6761 int exception_return = dsc->u.block.load && dsc->u.block.user
6762 && (regmask & 0x8000) != 0;
6763 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6764 int do_transfer = condition_true (dsc->u.block.cond, status);
6765 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6766
6767 if (!do_transfer)
6768 return;
6769
6770 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6771 sensible we can do here. Complain loudly. */
6772 if (exception_return)
6773 error (_("Cannot single-step exception return"));
6774
6775 /* We don't handle any stores here for now. */
6776 gdb_assert (dsc->u.block.load != 0);
6777
6778 if (debug_displaced)
6779 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6780 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6781 dsc->u.block.increment ? "inc" : "dec",
6782 dsc->u.block.before ? "before" : "after");
6783
6784 while (regmask)
6785 {
6786 uint32_t memword;
6787
6788 if (inc)
6789 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6790 regno++;
6791 else
6792 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6793 regno--;
6794
6795 xfer_addr += bump_before;
6796
6797 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6798 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6799
6800 xfer_addr += bump_after;
6801
6802 regmask &= ~(1 << regno);
6803 }
6804
6805 if (dsc->u.block.writeback)
6806 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6807 CANNOT_WRITE_PC);
6808 }
6809
6810 /* Clean up an STM which included the PC in the register list. */
6811
6812 static void
6813 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6814 struct displaced_step_closure *dsc)
6815 {
6816 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6817 int store_executed = condition_true (dsc->u.block.cond, status);
6818 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6819 CORE_ADDR stm_insn_addr;
6820 uint32_t pc_val;
6821 long offset;
6822 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6823
6824 /* If condition code fails, there's nothing else to do. */
6825 if (!store_executed)
6826 return;
6827
6828 if (dsc->u.block.increment)
6829 {
6830 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6831
6832 if (dsc->u.block.before)
6833 pc_stored_at += 4;
6834 }
6835 else
6836 {
6837 pc_stored_at = dsc->u.block.xfer_addr;
6838
6839 if (dsc->u.block.before)
6840 pc_stored_at -= 4;
6841 }
6842
6843 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6844 stm_insn_addr = dsc->scratch_base;
6845 offset = pc_val - stm_insn_addr;
6846
6847 if (debug_displaced)
6848 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6849 "STM instruction\n", offset);
6850
6851 /* Rewrite the stored PC to the proper value for the non-displaced original
6852 instruction. */
6853 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6854 dsc->insn_addr + offset);
6855 }
6856
6857 /* Clean up an LDM which includes the PC in the register list. We clumped all
6858 the registers in the transferred list into a contiguous range r0...rX (to
6859 avoid loading PC directly and losing control of the debugged program), so we
6860 must undo that here. */
6861
6862 static void
6863 cleanup_block_load_pc (struct gdbarch *gdbarch,
6864 struct regcache *regs,
6865 struct displaced_step_closure *dsc)
6866 {
6867 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6868 int load_executed = condition_true (dsc->u.block.cond, status);
6869 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6870 unsigned int regs_loaded = bitcount (mask);
6871 unsigned int num_to_shuffle = regs_loaded, clobbered;
6872
6873 /* The method employed here will fail if the register list is fully populated
6874 (we need to avoid loading PC directly). */
6875 gdb_assert (num_to_shuffle < 16);
6876
6877 if (!load_executed)
6878 return;
6879
6880 clobbered = (1 << num_to_shuffle) - 1;
6881
6882 while (num_to_shuffle > 0)
6883 {
6884 if ((mask & (1 << write_reg)) != 0)
6885 {
6886 unsigned int read_reg = num_to_shuffle - 1;
6887
6888 if (read_reg != write_reg)
6889 {
6890 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
6891 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
6892 if (debug_displaced)
6893 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
6894 "loaded register r%d to r%d\n"), read_reg,
6895 write_reg);
6896 }
6897 else if (debug_displaced)
6898 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
6899 "r%d already in the right place\n"),
6900 write_reg);
6901
6902 clobbered &= ~(1 << write_reg);
6903
6904 num_to_shuffle--;
6905 }
6906
6907 write_reg--;
6908 }
6909
6910 /* Restore any registers we scribbled over. */
6911 for (write_reg = 0; clobbered != 0; write_reg++)
6912 {
6913 if ((clobbered & (1 << write_reg)) != 0)
6914 {
6915 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
6916 CANNOT_WRITE_PC);
6917 if (debug_displaced)
6918 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
6919 "clobbered register r%d\n"), write_reg);
6920 clobbered &= ~(1 << write_reg);
6921 }
6922 }
6923
6924 /* Perform register writeback manually. */
6925 if (dsc->u.block.writeback)
6926 {
6927 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
6928
6929 if (dsc->u.block.increment)
6930 new_rn_val += regs_loaded * 4;
6931 else
6932 new_rn_val -= regs_loaded * 4;
6933
6934 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
6935 CANNOT_WRITE_PC);
6936 }
6937 }
6938
6939 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
6940 in user-level code (in particular exception return, ldm rn, {...pc}^). */
6941
6942 static int
6943 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
6944 struct regcache *regs,
6945 struct displaced_step_closure *dsc)
6946 {
6947 int load = bit (insn, 20);
6948 int user = bit (insn, 22);
6949 int increment = bit (insn, 23);
6950 int before = bit (insn, 24);
6951 int writeback = bit (insn, 21);
6952 int rn = bits (insn, 16, 19);
6953
6954 /* Block transfers which don't mention PC can be run directly
6955 out-of-line. */
6956 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
6957 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
6958
6959 if (rn == ARM_PC_REGNUM)
6960 {
6961 warning (_("displaced: Unpredictable LDM or STM with "
6962 "base register r15"));
6963 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
6964 }
6965
6966 if (debug_displaced)
6967 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
6968 "%.8lx\n", (unsigned long) insn);
6969
6970 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
6971 dsc->u.block.rn = rn;
6972
6973 dsc->u.block.load = load;
6974 dsc->u.block.user = user;
6975 dsc->u.block.increment = increment;
6976 dsc->u.block.before = before;
6977 dsc->u.block.writeback = writeback;
6978 dsc->u.block.cond = bits (insn, 28, 31);
6979
6980 dsc->u.block.regmask = insn & 0xffff;
6981
6982 if (load)
6983 {
6984 if ((insn & 0xffff) == 0xffff)
6985 {
6986 /* LDM with a fully-populated register list. This case is
6987 particularly tricky. Implement for now by fully emulating the
6988 instruction (which might not behave perfectly in all cases, but
6989 these instructions should be rare enough for that not to matter
6990 too much). */
6991 dsc->modinsn[0] = ARM_NOP;
6992
6993 dsc->cleanup = &cleanup_block_load_all;
6994 }
6995 else
6996 {
6997 /* LDM of a list of registers which includes PC. Implement by
6998 rewriting the list of registers to be transferred into a
6999 contiguous chunk r0...rX before doing the transfer, then shuffling
7000 registers into the correct places in the cleanup routine. */
7001 unsigned int regmask = insn & 0xffff;
7002 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7003 unsigned int to = 0, from = 0, i, new_rn;
7004
7005 for (i = 0; i < num_in_list; i++)
7006 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7007
7008 /* Writeback makes things complicated. We need to avoid clobbering
7009 the base register with one of the registers in our modified
7010 register list, but just using a different register can't work in
7011 all cases, e.g.:
7012
7013 ldm r14!, {r0-r13,pc}
7014
7015 which would need to be rewritten as:
7016
7017 ldm rN!, {r0-r14}
7018
7019 but that can't work, because there's no free register for N.
7020
7021 Solve this by turning off the writeback bit, and emulating
7022 writeback manually in the cleanup routine. */
7023
7024 if (writeback)
7025 insn &= ~(1 << 21);
7026
7027 new_regmask = (1 << num_in_list) - 1;
7028
7029 if (debug_displaced)
7030 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7031 "{..., pc}: original reg list %.4x, modified "
7032 "list %.4x\n"), rn, writeback ? "!" : "",
7033 (int) insn & 0xffff, new_regmask);
7034
7035 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7036
7037 dsc->cleanup = &cleanup_block_load_pc;
7038 }
7039 }
7040 else
7041 {
7042 /* STM of a list of registers which includes PC. Run the instruction
7043 as-is, but out of line: this will store the wrong value for the PC,
7044 so we must manually fix up the memory in the cleanup routine.
7045 Doing things this way has the advantage that we can auto-detect
7046 the offset of the PC write (which is architecture-dependent) in
7047 the cleanup routine. */
7048 dsc->modinsn[0] = insn;
7049
7050 dsc->cleanup = &cleanup_block_store_pc;
7051 }
7052
7053 return 0;
7054 }
7055
7056 static int
7057 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7058 struct regcache *regs,
7059 struct displaced_step_closure *dsc)
7060 {
7061 int rn = bits (insn1, 0, 3);
7062 int load = bit (insn1, 4);
7063 int writeback = bit (insn1, 5);
7064
7065 /* Block transfers which don't mention PC can be run directly
7066 out-of-line. */
7067 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7068 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7069
7070 if (rn == ARM_PC_REGNUM)
7071 {
7072 warning (_("displaced: Unpredictable LDM or STM with "
7073 "base register r15"));
7074 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7075 "unpredictable ldm/stm", dsc);
7076 }
7077
7078 if (debug_displaced)
7079 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7080 "%.4x%.4x\n", insn1, insn2);
7081
7082 /* Clear bit 13, since it should be always zero. */
7083 dsc->u.block.regmask = (insn2 & 0xdfff);
7084 dsc->u.block.rn = rn;
7085
7086 dsc->u.block.load = load;
7087 dsc->u.block.user = 0;
7088 dsc->u.block.increment = bit (insn1, 7);
7089 dsc->u.block.before = bit (insn1, 8);
7090 dsc->u.block.writeback = writeback;
7091 dsc->u.block.cond = INST_AL;
7092 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7093
7094 if (load)
7095 {
7096 if (dsc->u.block.regmask == 0xffff)
7097 {
7098 /* This branch is impossible to happen. */
7099 gdb_assert (0);
7100 }
7101 else
7102 {
7103 unsigned int regmask = dsc->u.block.regmask;
7104 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7105 unsigned int to = 0, from = 0, i, new_rn;
7106
7107 for (i = 0; i < num_in_list; i++)
7108 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7109
7110 if (writeback)
7111 insn1 &= ~(1 << 5);
7112
7113 new_regmask = (1 << num_in_list) - 1;
7114
7115 if (debug_displaced)
7116 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7117 "{..., pc}: original reg list %.4x, modified "
7118 "list %.4x\n"), rn, writeback ? "!" : "",
7119 (int) dsc->u.block.regmask, new_regmask);
7120
7121 dsc->modinsn[0] = insn1;
7122 dsc->modinsn[1] = (new_regmask & 0xffff);
7123 dsc->numinsns = 2;
7124
7125 dsc->cleanup = &cleanup_block_load_pc;
7126 }
7127 }
7128 else
7129 {
7130 dsc->modinsn[0] = insn1;
7131 dsc->modinsn[1] = insn2;
7132 dsc->numinsns = 2;
7133 dsc->cleanup = &cleanup_block_store_pc;
7134 }
7135 return 0;
7136 }
7137
7138 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7139 for Linux, where some SVC instructions must be treated specially. */
7140
7141 static void
7142 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7143 struct displaced_step_closure *dsc)
7144 {
7145 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7146
7147 if (debug_displaced)
7148 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7149 "%.8lx\n", (unsigned long) resume_addr);
7150
7151 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7152 }
7153
7154
7155 /* Common copy routine for svc instruciton. */
7156
7157 static int
7158 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7159 struct displaced_step_closure *dsc)
7160 {
7161 /* Preparation: none.
7162 Insn: unmodified svc.
7163 Cleanup: pc <- insn_addr + insn_size. */
7164
7165 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7166 instruction. */
7167 dsc->wrote_to_pc = 1;
7168
7169 /* Allow OS-specific code to override SVC handling. */
7170 if (dsc->u.svc.copy_svc_os)
7171 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7172 else
7173 {
7174 dsc->cleanup = &cleanup_svc;
7175 return 0;
7176 }
7177 }
7178
7179 static int
7180 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7181 struct regcache *regs, struct displaced_step_closure *dsc)
7182 {
7183
7184 if (debug_displaced)
7185 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7186 (unsigned long) insn);
7187
7188 dsc->modinsn[0] = insn;
7189
7190 return install_svc (gdbarch, regs, dsc);
7191 }
7192
7193 static int
7194 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7195 struct regcache *regs, struct displaced_step_closure *dsc)
7196 {
7197
7198 if (debug_displaced)
7199 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7200 insn);
7201
7202 dsc->modinsn[0] = insn;
7203
7204 return install_svc (gdbarch, regs, dsc);
7205 }
7206
7207 /* Copy undefined instructions. */
7208
7209 static int
7210 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7211 struct displaced_step_closure *dsc)
7212 {
7213 if (debug_displaced)
7214 fprintf_unfiltered (gdb_stdlog,
7215 "displaced: copying undefined insn %.8lx\n",
7216 (unsigned long) insn);
7217
7218 dsc->modinsn[0] = insn;
7219
7220 return 0;
7221 }
7222
7223 static int
7224 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7225 struct displaced_step_closure *dsc)
7226 {
7227
7228 if (debug_displaced)
7229 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7230 "%.4x %.4x\n", (unsigned short) insn1,
7231 (unsigned short) insn2);
7232
7233 dsc->modinsn[0] = insn1;
7234 dsc->modinsn[1] = insn2;
7235 dsc->numinsns = 2;
7236
7237 return 0;
7238 }
7239
7240 /* Copy unpredictable instructions. */
7241
7242 static int
7243 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7244 struct displaced_step_closure *dsc)
7245 {
7246 if (debug_displaced)
7247 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7248 "%.8lx\n", (unsigned long) insn);
7249
7250 dsc->modinsn[0] = insn;
7251
7252 return 0;
7253 }
7254
7255 /* The decode_* functions are instruction decoding helpers. They mostly follow
7256 the presentation in the ARM ARM. */
7257
7258 static int
7259 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7260 struct regcache *regs,
7261 struct displaced_step_closure *dsc)
7262 {
7263 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7264 unsigned int rn = bits (insn, 16, 19);
7265
7266 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7267 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7268 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7269 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7270 else if ((op1 & 0x60) == 0x20)
7271 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7272 else if ((op1 & 0x71) == 0x40)
7273 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7274 dsc);
7275 else if ((op1 & 0x77) == 0x41)
7276 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7277 else if ((op1 & 0x77) == 0x45)
7278 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7279 else if ((op1 & 0x77) == 0x51)
7280 {
7281 if (rn != 0xf)
7282 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7283 else
7284 return arm_copy_unpred (gdbarch, insn, dsc);
7285 }
7286 else if ((op1 & 0x77) == 0x55)
7287 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7288 else if (op1 == 0x57)
7289 switch (op2)
7290 {
7291 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7292 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7293 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7294 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7295 default: return arm_copy_unpred (gdbarch, insn, dsc);
7296 }
7297 else if ((op1 & 0x63) == 0x43)
7298 return arm_copy_unpred (gdbarch, insn, dsc);
7299 else if ((op2 & 0x1) == 0x0)
7300 switch (op1 & ~0x80)
7301 {
7302 case 0x61:
7303 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7304 case 0x65:
7305 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7306 case 0x71: case 0x75:
7307 /* pld/pldw reg. */
7308 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7309 case 0x63: case 0x67: case 0x73: case 0x77:
7310 return arm_copy_unpred (gdbarch, insn, dsc);
7311 default:
7312 return arm_copy_undef (gdbarch, insn, dsc);
7313 }
7314 else
7315 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7316 }
7317
7318 static int
7319 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7320 struct regcache *regs,
7321 struct displaced_step_closure *dsc)
7322 {
7323 if (bit (insn, 27) == 0)
7324 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7325 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7326 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7327 {
7328 case 0x0: case 0x2:
7329 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7330
7331 case 0x1: case 0x3:
7332 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7333
7334 case 0x4: case 0x5: case 0x6: case 0x7:
7335 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7336
7337 case 0x8:
7338 switch ((insn & 0xe00000) >> 21)
7339 {
7340 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7341 /* stc/stc2. */
7342 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7343
7344 case 0x2:
7345 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7346
7347 default:
7348 return arm_copy_undef (gdbarch, insn, dsc);
7349 }
7350
7351 case 0x9:
7352 {
7353 int rn_f = (bits (insn, 16, 19) == 0xf);
7354 switch ((insn & 0xe00000) >> 21)
7355 {
7356 case 0x1: case 0x3:
7357 /* ldc/ldc2 imm (undefined for rn == pc). */
7358 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7359 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7360
7361 case 0x2:
7362 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7363
7364 case 0x4: case 0x5: case 0x6: case 0x7:
7365 /* ldc/ldc2 lit (undefined for rn != pc). */
7366 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7367 : arm_copy_undef (gdbarch, insn, dsc);
7368
7369 default:
7370 return arm_copy_undef (gdbarch, insn, dsc);
7371 }
7372 }
7373
7374 case 0xa:
7375 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7376
7377 case 0xb:
7378 if (bits (insn, 16, 19) == 0xf)
7379 /* ldc/ldc2 lit. */
7380 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7381 else
7382 return arm_copy_undef (gdbarch, insn, dsc);
7383
7384 case 0xc:
7385 if (bit (insn, 4))
7386 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7387 else
7388 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7389
7390 case 0xd:
7391 if (bit (insn, 4))
7392 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7393 else
7394 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7395
7396 default:
7397 return arm_copy_undef (gdbarch, insn, dsc);
7398 }
7399 }
7400
7401 /* Decode miscellaneous instructions in dp/misc encoding space. */
7402
7403 static int
7404 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7405 struct regcache *regs,
7406 struct displaced_step_closure *dsc)
7407 {
7408 unsigned int op2 = bits (insn, 4, 6);
7409 unsigned int op = bits (insn, 21, 22);
7410 unsigned int op1 = bits (insn, 16, 19);
7411
7412 switch (op2)
7413 {
7414 case 0x0:
7415 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7416
7417 case 0x1:
7418 if (op == 0x1) /* bx. */
7419 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7420 else if (op == 0x3)
7421 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7422 else
7423 return arm_copy_undef (gdbarch, insn, dsc);
7424
7425 case 0x2:
7426 if (op == 0x1)
7427 /* Not really supported. */
7428 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7429 else
7430 return arm_copy_undef (gdbarch, insn, dsc);
7431
7432 case 0x3:
7433 if (op == 0x1)
7434 return arm_copy_bx_blx_reg (gdbarch, insn,
7435 regs, dsc); /* blx register. */
7436 else
7437 return arm_copy_undef (gdbarch, insn, dsc);
7438
7439 case 0x5:
7440 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7441
7442 case 0x7:
7443 if (op == 0x1)
7444 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7445 else if (op == 0x3)
7446 /* Not really supported. */
7447 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7448
7449 default:
7450 return arm_copy_undef (gdbarch, insn, dsc);
7451 }
7452 }
7453
7454 static int
7455 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7456 struct regcache *regs,
7457 struct displaced_step_closure *dsc)
7458 {
7459 if (bit (insn, 25))
7460 switch (bits (insn, 20, 24))
7461 {
7462 case 0x10:
7463 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7464
7465 case 0x14:
7466 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7467
7468 case 0x12: case 0x16:
7469 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7470
7471 default:
7472 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7473 }
7474 else
7475 {
7476 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7477
7478 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7479 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7480 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7481 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7482 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7483 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7484 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7485 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7486 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7487 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7488 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7489 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7490 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7491 /* 2nd arg means "unpriveleged". */
7492 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7493 dsc);
7494 }
7495
7496 /* Should be unreachable. */
7497 return 1;
7498 }
7499
7500 static int
7501 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7502 struct regcache *regs,
7503 struct displaced_step_closure *dsc)
7504 {
7505 int a = bit (insn, 25), b = bit (insn, 4);
7506 uint32_t op1 = bits (insn, 20, 24);
7507 int rn_f = bits (insn, 16, 19) == 0xf;
7508
7509 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7510 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7511 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7512 else if ((!a && (op1 & 0x17) == 0x02)
7513 || (a && (op1 & 0x17) == 0x02 && !b))
7514 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7515 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7516 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7517 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7518 else if ((!a && (op1 & 0x17) == 0x03)
7519 || (a && (op1 & 0x17) == 0x03 && !b))
7520 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7521 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7522 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7523 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7524 else if ((!a && (op1 & 0x17) == 0x06)
7525 || (a && (op1 & 0x17) == 0x06 && !b))
7526 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7527 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7528 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7529 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7530 else if ((!a && (op1 & 0x17) == 0x07)
7531 || (a && (op1 & 0x17) == 0x07 && !b))
7532 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7533
7534 /* Should be unreachable. */
7535 return 1;
7536 }
7537
7538 static int
7539 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7540 struct displaced_step_closure *dsc)
7541 {
7542 switch (bits (insn, 20, 24))
7543 {
7544 case 0x00: case 0x01: case 0x02: case 0x03:
7545 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7546
7547 case 0x04: case 0x05: case 0x06: case 0x07:
7548 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7549
7550 case 0x08: case 0x09: case 0x0a: case 0x0b:
7551 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7552 return arm_copy_unmodified (gdbarch, insn,
7553 "decode/pack/unpack/saturate/reverse", dsc);
7554
7555 case 0x18:
7556 if (bits (insn, 5, 7) == 0) /* op2. */
7557 {
7558 if (bits (insn, 12, 15) == 0xf)
7559 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7560 else
7561 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7562 }
7563 else
7564 return arm_copy_undef (gdbarch, insn, dsc);
7565
7566 case 0x1a: case 0x1b:
7567 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7568 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7569 else
7570 return arm_copy_undef (gdbarch, insn, dsc);
7571
7572 case 0x1c: case 0x1d:
7573 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7574 {
7575 if (bits (insn, 0, 3) == 0xf)
7576 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7577 else
7578 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7579 }
7580 else
7581 return arm_copy_undef (gdbarch, insn, dsc);
7582
7583 case 0x1e: case 0x1f:
7584 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7585 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7586 else
7587 return arm_copy_undef (gdbarch, insn, dsc);
7588 }
7589
7590 /* Should be unreachable. */
7591 return 1;
7592 }
7593
7594 static int
7595 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7596 struct regcache *regs,
7597 struct displaced_step_closure *dsc)
7598 {
7599 if (bit (insn, 25))
7600 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7601 else
7602 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7603 }
7604
7605 static int
7606 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7607 struct regcache *regs,
7608 struct displaced_step_closure *dsc)
7609 {
7610 unsigned int opcode = bits (insn, 20, 24);
7611
7612 switch (opcode)
7613 {
7614 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7615 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7616
7617 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7618 case 0x12: case 0x16:
7619 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7620
7621 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7622 case 0x13: case 0x17:
7623 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7624
7625 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7626 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7627 /* Note: no writeback for these instructions. Bit 25 will always be
7628 zero though (via caller), so the following works OK. */
7629 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7630 }
7631
7632 /* Should be unreachable. */
7633 return 1;
7634 }
7635
7636 /* Decode shifted register instructions. */
7637
7638 static int
7639 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7640 uint16_t insn2, struct regcache *regs,
7641 struct displaced_step_closure *dsc)
7642 {
7643 /* PC is only allowed to be used in instruction MOV. */
7644
7645 unsigned int op = bits (insn1, 5, 8);
7646 unsigned int rn = bits (insn1, 0, 3);
7647
7648 if (op == 0x2 && rn == 0xf) /* MOV */
7649 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7650 else
7651 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7652 "dp (shift reg)", dsc);
7653 }
7654
7655
7656 /* Decode extension register load/store. Exactly the same as
7657 arm_decode_ext_reg_ld_st. */
7658
7659 static int
7660 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7661 uint16_t insn2, struct regcache *regs,
7662 struct displaced_step_closure *dsc)
7663 {
7664 unsigned int opcode = bits (insn1, 4, 8);
7665
7666 switch (opcode)
7667 {
7668 case 0x04: case 0x05:
7669 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7670 "vfp/neon vmov", dsc);
7671
7672 case 0x08: case 0x0c: /* 01x00 */
7673 case 0x0a: case 0x0e: /* 01x10 */
7674 case 0x12: case 0x16: /* 10x10 */
7675 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7676 "vfp/neon vstm/vpush", dsc);
7677
7678 case 0x09: case 0x0d: /* 01x01 */
7679 case 0x0b: case 0x0f: /* 01x11 */
7680 case 0x13: case 0x17: /* 10x11 */
7681 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7682 "vfp/neon vldm/vpop", dsc);
7683
7684 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7685 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7686 "vstr", dsc);
7687 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7688 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7689 }
7690
7691 /* Should be unreachable. */
7692 return 1;
7693 }
7694
7695 static int
7696 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7697 struct regcache *regs, struct displaced_step_closure *dsc)
7698 {
7699 unsigned int op1 = bits (insn, 20, 25);
7700 int op = bit (insn, 4);
7701 unsigned int coproc = bits (insn, 8, 11);
7702 unsigned int rn = bits (insn, 16, 19);
7703
7704 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7705 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7706 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7707 && (coproc & 0xe) != 0xa)
7708 /* stc/stc2. */
7709 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7710 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7711 && (coproc & 0xe) != 0xa)
7712 /* ldc/ldc2 imm/lit. */
7713 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7714 else if ((op1 & 0x3e) == 0x00)
7715 return arm_copy_undef (gdbarch, insn, dsc);
7716 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7717 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7718 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7719 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7720 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7721 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7722 else if ((op1 & 0x30) == 0x20 && !op)
7723 {
7724 if ((coproc & 0xe) == 0xa)
7725 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7726 else
7727 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7728 }
7729 else if ((op1 & 0x30) == 0x20 && op)
7730 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7731 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7732 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7733 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7734 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7735 else if ((op1 & 0x30) == 0x30)
7736 return arm_copy_svc (gdbarch, insn, regs, dsc);
7737 else
7738 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7739 }
7740
7741 static int
7742 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7743 uint16_t insn2, struct regcache *regs,
7744 struct displaced_step_closure *dsc)
7745 {
7746 unsigned int coproc = bits (insn2, 8, 11);
7747 unsigned int op1 = bits (insn1, 4, 9);
7748 unsigned int bit_5_8 = bits (insn1, 5, 8);
7749 unsigned int bit_9 = bit (insn1, 9);
7750 unsigned int bit_4 = bit (insn1, 4);
7751 unsigned int rn = bits (insn1, 0, 3);
7752
7753 if (bit_9 == 0)
7754 {
7755 if (bit_5_8 == 2)
7756 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7757 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7758 dsc);
7759 else if (bit_5_8 == 0) /* UNDEFINED. */
7760 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7761 else
7762 {
7763 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7764 if ((coproc & 0xe) == 0xa)
7765 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7766 dsc);
7767 else /* coproc is not 101x. */
7768 {
7769 if (bit_4 == 0) /* STC/STC2. */
7770 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7771 "stc/stc2", dsc);
7772 else /* LDC/LDC2 {literal, immeidate}. */
7773 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7774 regs, dsc);
7775 }
7776 }
7777 }
7778 else
7779 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7780
7781 return 0;
7782 }
7783
7784 static void
7785 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7786 struct displaced_step_closure *dsc, int rd)
7787 {
7788 /* ADR Rd, #imm
7789
7790 Rewrite as:
7791
7792 Preparation: Rd <- PC
7793 Insn: ADD Rd, #imm
7794 Cleanup: Null.
7795 */
7796
7797 /* Rd <- PC */
7798 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7799 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7800 }
7801
7802 static int
7803 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7804 struct displaced_step_closure *dsc,
7805 int rd, unsigned int imm)
7806 {
7807
7808 /* Encoding T2: ADDS Rd, #imm */
7809 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7810
7811 install_pc_relative (gdbarch, regs, dsc, rd);
7812
7813 return 0;
7814 }
7815
7816 static int
7817 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7818 struct regcache *regs,
7819 struct displaced_step_closure *dsc)
7820 {
7821 unsigned int rd = bits (insn, 8, 10);
7822 unsigned int imm8 = bits (insn, 0, 7);
7823
7824 if (debug_displaced)
7825 fprintf_unfiltered (gdb_stdlog,
7826 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7827 rd, imm8, insn);
7828
7829 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7830 }
7831
7832 static int
7833 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7834 uint16_t insn2, struct regcache *regs,
7835 struct displaced_step_closure *dsc)
7836 {
7837 unsigned int rd = bits (insn2, 8, 11);
7838 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7839 extract raw immediate encoding rather than computing immediate. When
7840 generating ADD or SUB instruction, we can simply perform OR operation to
7841 set immediate into ADD. */
7842 unsigned int imm_3_8 = insn2 & 0x70ff;
7843 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7844
7845 if (debug_displaced)
7846 fprintf_unfiltered (gdb_stdlog,
7847 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7848 rd, imm_i, imm_3_8, insn1, insn2);
7849
7850 if (bit (insn1, 7)) /* Encoding T2 */
7851 {
7852 /* Encoding T3: SUB Rd, Rd, #imm */
7853 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7854 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7855 }
7856 else /* Encoding T3 */
7857 {
7858 /* Encoding T3: ADD Rd, Rd, #imm */
7859 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7860 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7861 }
7862 dsc->numinsns = 2;
7863
7864 install_pc_relative (gdbarch, regs, dsc, rd);
7865
7866 return 0;
7867 }
7868
7869 static int
7870 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7871 struct regcache *regs,
7872 struct displaced_step_closure *dsc)
7873 {
7874 unsigned int rt = bits (insn1, 8, 10);
7875 unsigned int pc;
7876 int imm8 = (bits (insn1, 0, 7) << 2);
7877 CORE_ADDR from = dsc->insn_addr;
7878
7879 /* LDR Rd, #imm8
7880
7881 Rwrite as:
7882
7883 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
7884
7885 Insn: LDR R0, [R2, R3];
7886 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
7887
7888 if (debug_displaced)
7889 fprintf_unfiltered (gdb_stdlog,
7890 "displaced: copying thumb ldr r%d [pc #%d]\n"
7891 , rt, imm8);
7892
7893 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
7894 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
7895 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
7896 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7897 /* The assembler calculates the required value of the offset from the
7898 Align(PC,4) value of this instruction to the label. */
7899 pc = pc & 0xfffffffc;
7900
7901 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
7902 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
7903
7904 dsc->rd = rt;
7905 dsc->u.ldst.xfersize = 4;
7906 dsc->u.ldst.rn = 0;
7907 dsc->u.ldst.immed = 0;
7908 dsc->u.ldst.writeback = 0;
7909 dsc->u.ldst.restore_r4 = 0;
7910
7911 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
7912
7913 dsc->cleanup = &cleanup_load;
7914
7915 return 0;
7916 }
7917
7918 /* Copy Thumb cbnz/cbz insruction. */
7919
7920 static int
7921 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
7922 struct regcache *regs,
7923 struct displaced_step_closure *dsc)
7924 {
7925 int non_zero = bit (insn1, 11);
7926 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
7927 CORE_ADDR from = dsc->insn_addr;
7928 int rn = bits (insn1, 0, 2);
7929 int rn_val = displaced_read_reg (regs, dsc, rn);
7930
7931 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
7932 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
7933 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
7934 condition is false, let it be, cleanup_branch will do nothing. */
7935 if (dsc->u.branch.cond)
7936 {
7937 dsc->u.branch.cond = INST_AL;
7938 dsc->u.branch.dest = from + 4 + imm5;
7939 }
7940 else
7941 dsc->u.branch.dest = from + 2;
7942
7943 dsc->u.branch.link = 0;
7944 dsc->u.branch.exchange = 0;
7945
7946 if (debug_displaced)
7947 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
7948 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
7949 rn, rn_val, insn1, dsc->u.branch.dest);
7950
7951 dsc->modinsn[0] = THUMB_NOP;
7952
7953 dsc->cleanup = &cleanup_branch;
7954 return 0;
7955 }
7956
7957 /* Copy Table Branch Byte/Halfword */
7958 static int
7959 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
7960 uint16_t insn2, struct regcache *regs,
7961 struct displaced_step_closure *dsc)
7962 {
7963 ULONGEST rn_val, rm_val;
7964 int is_tbh = bit (insn2, 4);
7965 CORE_ADDR halfwords = 0;
7966 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
7967
7968 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
7969 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
7970
7971 if (is_tbh)
7972 {
7973 gdb_byte buf[2];
7974
7975 target_read_memory (rn_val + 2 * rm_val, buf, 2);
7976 halfwords = extract_unsigned_integer (buf, 2, byte_order);
7977 }
7978 else
7979 {
7980 gdb_byte buf[1];
7981
7982 target_read_memory (rn_val + rm_val, buf, 1);
7983 halfwords = extract_unsigned_integer (buf, 1, byte_order);
7984 }
7985
7986 if (debug_displaced)
7987 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
7988 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
7989 (unsigned int) rn_val, (unsigned int) rm_val,
7990 (unsigned int) halfwords);
7991
7992 dsc->u.branch.cond = INST_AL;
7993 dsc->u.branch.link = 0;
7994 dsc->u.branch.exchange = 0;
7995 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
7996
7997 dsc->cleanup = &cleanup_branch;
7998
7999 return 0;
8000 }
8001
8002 static void
8003 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
8004 struct displaced_step_closure *dsc)
8005 {
8006 /* PC <- r7 */
8007 int val = displaced_read_reg (regs, dsc, 7);
8008 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8009
8010 /* r7 <- r8 */
8011 val = displaced_read_reg (regs, dsc, 8);
8012 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8013
8014 /* r8 <- tmp[0] */
8015 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8016
8017 }
8018
8019 static int
8020 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8021 struct regcache *regs,
8022 struct displaced_step_closure *dsc)
8023 {
8024 dsc->u.block.regmask = insn1 & 0x00ff;
8025
8026 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8027 to :
8028
8029 (1) register list is full, that is, r0-r7 are used.
8030 Prepare: tmp[0] <- r8
8031
8032 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8033 MOV r8, r7; Move value of r7 to r8;
8034 POP {r7}; Store PC value into r7.
8035
8036 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8037
8038 (2) register list is not full, supposing there are N registers in
8039 register list (except PC, 0 <= N <= 7).
8040 Prepare: for each i, 0 - N, tmp[i] <- ri.
8041
8042 POP {r0, r1, ...., rN};
8043
8044 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8045 from tmp[] properly.
8046 */
8047 if (debug_displaced)
8048 fprintf_unfiltered (gdb_stdlog,
8049 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8050 dsc->u.block.regmask, insn1);
8051
8052 if (dsc->u.block.regmask == 0xff)
8053 {
8054 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8055
8056 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8057 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8058 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8059
8060 dsc->numinsns = 3;
8061 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8062 }
8063 else
8064 {
8065 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8066 unsigned int new_regmask, bit = 1;
8067 unsigned int to = 0, from = 0, i, new_rn;
8068
8069 for (i = 0; i < num_in_list + 1; i++)
8070 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8071
8072 new_regmask = (1 << (num_in_list + 1)) - 1;
8073
8074 if (debug_displaced)
8075 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8076 "{..., pc}: original reg list %.4x,"
8077 " modified list %.4x\n"),
8078 (int) dsc->u.block.regmask, new_regmask);
8079
8080 dsc->u.block.regmask |= 0x8000;
8081 dsc->u.block.writeback = 0;
8082 dsc->u.block.cond = INST_AL;
8083
8084 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8085
8086 dsc->cleanup = &cleanup_block_load_pc;
8087 }
8088
8089 return 0;
8090 }
8091
8092 static void
8093 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8094 struct regcache *regs,
8095 struct displaced_step_closure *dsc)
8096 {
8097 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8098 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8099 int err = 0;
8100
8101 /* 16-bit thumb instructions. */
8102 switch (op_bit_12_15)
8103 {
8104 /* Shift (imme), add, subtract, move and compare. */
8105 case 0: case 1: case 2: case 3:
8106 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8107 "shift/add/sub/mov/cmp",
8108 dsc);
8109 break;
8110 case 4:
8111 switch (op_bit_10_11)
8112 {
8113 case 0: /* Data-processing */
8114 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8115 "data-processing",
8116 dsc);
8117 break;
8118 case 1: /* Special data instructions and branch and exchange. */
8119 {
8120 unsigned short op = bits (insn1, 7, 9);
8121 if (op == 6 || op == 7) /* BX or BLX */
8122 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8123 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8124 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8125 else
8126 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8127 dsc);
8128 }
8129 break;
8130 default: /* LDR (literal) */
8131 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8132 }
8133 break;
8134 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8135 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8136 break;
8137 case 10:
8138 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8139 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8140 else /* Generate SP-relative address */
8141 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8142 break;
8143 case 11: /* Misc 16-bit instructions */
8144 {
8145 switch (bits (insn1, 8, 11))
8146 {
8147 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8148 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8149 break;
8150 case 12: case 13: /* POP */
8151 if (bit (insn1, 8)) /* PC is in register list. */
8152 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8153 else
8154 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8155 break;
8156 case 15: /* If-Then, and hints */
8157 if (bits (insn1, 0, 3))
8158 /* If-Then makes up to four following instructions conditional.
8159 IT instruction itself is not conditional, so handle it as a
8160 common unmodified instruction. */
8161 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8162 dsc);
8163 else
8164 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8165 break;
8166 default:
8167 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8168 }
8169 }
8170 break;
8171 case 12:
8172 if (op_bit_10_11 < 2) /* Store multiple registers */
8173 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8174 else /* Load multiple registers */
8175 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8176 break;
8177 case 13: /* Conditional branch and supervisor call */
8178 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8179 err = thumb_copy_b (gdbarch, insn1, dsc);
8180 else
8181 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8182 break;
8183 case 14: /* Unconditional branch */
8184 err = thumb_copy_b (gdbarch, insn1, dsc);
8185 break;
8186 default:
8187 err = 1;
8188 }
8189
8190 if (err)
8191 internal_error (__FILE__, __LINE__,
8192 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8193 }
8194
8195 static int
8196 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8197 uint16_t insn1, uint16_t insn2,
8198 struct regcache *regs,
8199 struct displaced_step_closure *dsc)
8200 {
8201 int rt = bits (insn2, 12, 15);
8202 int rn = bits (insn1, 0, 3);
8203 int op1 = bits (insn1, 7, 8);
8204 int err = 0;
8205
8206 switch (bits (insn1, 5, 6))
8207 {
8208 case 0: /* Load byte and memory hints */
8209 if (rt == 0xf) /* PLD/PLI */
8210 {
8211 if (rn == 0xf)
8212 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8213 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8214 else
8215 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8216 "pli/pld", dsc);
8217 }
8218 else
8219 {
8220 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8221 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8222 1);
8223 else
8224 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8225 "ldrb{reg, immediate}/ldrbt",
8226 dsc);
8227 }
8228
8229 break;
8230 case 1: /* Load halfword and memory hints. */
8231 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8232 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8233 "pld/unalloc memhint", dsc);
8234 else
8235 {
8236 if (rn == 0xf)
8237 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8238 2);
8239 else
8240 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8241 "ldrh/ldrht", dsc);
8242 }
8243 break;
8244 case 2: /* Load word */
8245 {
8246 int insn2_bit_8_11 = bits (insn2, 8, 11);
8247
8248 if (rn == 0xf)
8249 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8250 else if (op1 == 0x1) /* Encoding T3 */
8251 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8252 0, 1);
8253 else /* op1 == 0x0 */
8254 {
8255 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8256 /* LDR (immediate) */
8257 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8258 dsc, bit (insn2, 8), 1);
8259 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8260 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8261 "ldrt", dsc);
8262 else
8263 /* LDR (register) */
8264 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8265 dsc, 0, 0);
8266 }
8267 break;
8268 }
8269 default:
8270 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8271 break;
8272 }
8273 return 0;
8274 }
8275
8276 static void
8277 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8278 uint16_t insn2, struct regcache *regs,
8279 struct displaced_step_closure *dsc)
8280 {
8281 int err = 0;
8282 unsigned short op = bit (insn2, 15);
8283 unsigned int op1 = bits (insn1, 11, 12);
8284
8285 switch (op1)
8286 {
8287 case 1:
8288 {
8289 switch (bits (insn1, 9, 10))
8290 {
8291 case 0:
8292 if (bit (insn1, 6))
8293 {
8294 /* Load/store {dual, execlusive}, table branch. */
8295 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8296 && bits (insn2, 5, 7) == 0)
8297 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8298 dsc);
8299 else
8300 /* PC is not allowed to use in load/store {dual, exclusive}
8301 instructions. */
8302 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8303 "load/store dual/ex", dsc);
8304 }
8305 else /* load/store multiple */
8306 {
8307 switch (bits (insn1, 7, 8))
8308 {
8309 case 0: case 3: /* SRS, RFE */
8310 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8311 "srs/rfe", dsc);
8312 break;
8313 case 1: case 2: /* LDM/STM/PUSH/POP */
8314 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8315 break;
8316 }
8317 }
8318 break;
8319
8320 case 1:
8321 /* Data-processing (shift register). */
8322 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8323 dsc);
8324 break;
8325 default: /* Coprocessor instructions. */
8326 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8327 break;
8328 }
8329 break;
8330 }
8331 case 2: /* op1 = 2 */
8332 if (op) /* Branch and misc control. */
8333 {
8334 if (bit (insn2, 14) /* BLX/BL */
8335 || bit (insn2, 12) /* Unconditional branch */
8336 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8337 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8338 else
8339 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8340 "misc ctrl", dsc);
8341 }
8342 else
8343 {
8344 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8345 {
8346 int op = bits (insn1, 4, 8);
8347 int rn = bits (insn1, 0, 3);
8348 if ((op == 0 || op == 0xa) && rn == 0xf)
8349 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8350 regs, dsc);
8351 else
8352 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8353 "dp/pb", dsc);
8354 }
8355 else /* Data processing (modified immeidate) */
8356 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8357 "dp/mi", dsc);
8358 }
8359 break;
8360 case 3: /* op1 = 3 */
8361 switch (bits (insn1, 9, 10))
8362 {
8363 case 0:
8364 if (bit (insn1, 4))
8365 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8366 regs, dsc);
8367 else /* NEON Load/Store and Store single data item */
8368 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8369 "neon elt/struct load/store",
8370 dsc);
8371 break;
8372 case 1: /* op1 = 3, bits (9, 10) == 1 */
8373 switch (bits (insn1, 7, 8))
8374 {
8375 case 0: case 1: /* Data processing (register) */
8376 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8377 "dp(reg)", dsc);
8378 break;
8379 case 2: /* Multiply and absolute difference */
8380 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8381 "mul/mua/diff", dsc);
8382 break;
8383 case 3: /* Long multiply and divide */
8384 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8385 "lmul/lmua", dsc);
8386 break;
8387 }
8388 break;
8389 default: /* Coprocessor instructions */
8390 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8391 break;
8392 }
8393 break;
8394 default:
8395 err = 1;
8396 }
8397
8398 if (err)
8399 internal_error (__FILE__, __LINE__,
8400 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8401
8402 }
8403
8404 static void
8405 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8406 CORE_ADDR to, struct regcache *regs,
8407 struct displaced_step_closure *dsc)
8408 {
8409 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8410 uint16_t insn1
8411 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8412
8413 if (debug_displaced)
8414 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8415 "at %.8lx\n", insn1, (unsigned long) from);
8416
8417 dsc->is_thumb = 1;
8418 dsc->insn_size = thumb_insn_size (insn1);
8419 if (thumb_insn_size (insn1) == 4)
8420 {
8421 uint16_t insn2
8422 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8423 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8424 }
8425 else
8426 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8427 }
8428
8429 void
8430 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8431 CORE_ADDR to, struct regcache *regs,
8432 struct displaced_step_closure *dsc)
8433 {
8434 int err = 0;
8435 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8436 uint32_t insn;
8437
8438 /* Most displaced instructions use a 1-instruction scratch space, so set this
8439 here and override below if/when necessary. */
8440 dsc->numinsns = 1;
8441 dsc->insn_addr = from;
8442 dsc->scratch_base = to;
8443 dsc->cleanup = NULL;
8444 dsc->wrote_to_pc = 0;
8445
8446 if (!displaced_in_arm_mode (regs))
8447 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8448
8449 dsc->is_thumb = 0;
8450 dsc->insn_size = 4;
8451 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8452 if (debug_displaced)
8453 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8454 "at %.8lx\n", (unsigned long) insn,
8455 (unsigned long) from);
8456
8457 if ((insn & 0xf0000000) == 0xf0000000)
8458 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8459 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8460 {
8461 case 0x0: case 0x1: case 0x2: case 0x3:
8462 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8463 break;
8464
8465 case 0x4: case 0x5: case 0x6:
8466 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8467 break;
8468
8469 case 0x7:
8470 err = arm_decode_media (gdbarch, insn, dsc);
8471 break;
8472
8473 case 0x8: case 0x9: case 0xa: case 0xb:
8474 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8475 break;
8476
8477 case 0xc: case 0xd: case 0xe: case 0xf:
8478 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8479 break;
8480 }
8481
8482 if (err)
8483 internal_error (__FILE__, __LINE__,
8484 _("arm_process_displaced_insn: Instruction decode error"));
8485 }
8486
8487 /* Actually set up the scratch space for a displaced instruction. */
8488
8489 void
8490 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8491 CORE_ADDR to, struct displaced_step_closure *dsc)
8492 {
8493 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8494 unsigned int i, len, offset;
8495 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8496 int size = dsc->is_thumb? 2 : 4;
8497 const unsigned char *bkp_insn;
8498
8499 offset = 0;
8500 /* Poke modified instruction(s). */
8501 for (i = 0; i < dsc->numinsns; i++)
8502 {
8503 if (debug_displaced)
8504 {
8505 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8506 if (size == 4)
8507 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8508 dsc->modinsn[i]);
8509 else if (size == 2)
8510 fprintf_unfiltered (gdb_stdlog, "%.4x",
8511 (unsigned short)dsc->modinsn[i]);
8512
8513 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8514 (unsigned long) to + offset);
8515
8516 }
8517 write_memory_unsigned_integer (to + offset, size,
8518 byte_order_for_code,
8519 dsc->modinsn[i]);
8520 offset += size;
8521 }
8522
8523 /* Choose the correct breakpoint instruction. */
8524 if (dsc->is_thumb)
8525 {
8526 bkp_insn = tdep->thumb_breakpoint;
8527 len = tdep->thumb_breakpoint_size;
8528 }
8529 else
8530 {
8531 bkp_insn = tdep->arm_breakpoint;
8532 len = tdep->arm_breakpoint_size;
8533 }
8534
8535 /* Put breakpoint afterwards. */
8536 write_memory (to + offset, bkp_insn, len);
8537
8538 if (debug_displaced)
8539 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8540 paddress (gdbarch, from), paddress (gdbarch, to));
8541 }
8542
8543 /* Entry point for copying an instruction into scratch space for displaced
8544 stepping. */
8545
8546 struct displaced_step_closure *
8547 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8548 CORE_ADDR from, CORE_ADDR to,
8549 struct regcache *regs)
8550 {
8551 struct displaced_step_closure *dsc
8552 = xmalloc (sizeof (struct displaced_step_closure));
8553 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8554 arm_displaced_init_closure (gdbarch, from, to, dsc);
8555
8556 return dsc;
8557 }
8558
8559 /* Entry point for cleaning things up after a displaced instruction has been
8560 single-stepped. */
8561
8562 void
8563 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8564 struct displaced_step_closure *dsc,
8565 CORE_ADDR from, CORE_ADDR to,
8566 struct regcache *regs)
8567 {
8568 if (dsc->cleanup)
8569 dsc->cleanup (gdbarch, regs, dsc);
8570
8571 if (!dsc->wrote_to_pc)
8572 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8573 dsc->insn_addr + dsc->insn_size);
8574
8575 }
8576
8577 #include "bfd-in2.h"
8578 #include "libcoff.h"
8579
8580 static int
8581 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8582 {
8583 struct gdbarch *gdbarch = info->application_data;
8584
8585 if (arm_pc_is_thumb (gdbarch, memaddr))
8586 {
8587 static asymbol *asym;
8588 static combined_entry_type ce;
8589 static struct coff_symbol_struct csym;
8590 static struct bfd fake_bfd;
8591 static bfd_target fake_target;
8592
8593 if (csym.native == NULL)
8594 {
8595 /* Create a fake symbol vector containing a Thumb symbol.
8596 This is solely so that the code in print_insn_little_arm()
8597 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8598 the presence of a Thumb symbol and switch to decoding
8599 Thumb instructions. */
8600
8601 fake_target.flavour = bfd_target_coff_flavour;
8602 fake_bfd.xvec = &fake_target;
8603 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8604 csym.native = &ce;
8605 csym.symbol.the_bfd = &fake_bfd;
8606 csym.symbol.name = "fake";
8607 asym = (asymbol *) & csym;
8608 }
8609
8610 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8611 info->symbols = &asym;
8612 }
8613 else
8614 info->symbols = NULL;
8615
8616 if (info->endian == BFD_ENDIAN_BIG)
8617 return print_insn_big_arm (memaddr, info);
8618 else
8619 return print_insn_little_arm (memaddr, info);
8620 }
8621
8622 /* The following define instruction sequences that will cause ARM
8623 cpu's to take an undefined instruction trap. These are used to
8624 signal a breakpoint to GDB.
8625
8626 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8627 modes. A different instruction is required for each mode. The ARM
8628 cpu's can also be big or little endian. Thus four different
8629 instructions are needed to support all cases.
8630
8631 Note: ARMv4 defines several new instructions that will take the
8632 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8633 not in fact add the new instructions. The new undefined
8634 instructions in ARMv4 are all instructions that had no defined
8635 behaviour in earlier chips. There is no guarantee that they will
8636 raise an exception, but may be treated as NOP's. In practice, it
8637 may only safe to rely on instructions matching:
8638
8639 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8640 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8641 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8642
8643 Even this may only true if the condition predicate is true. The
8644 following use a condition predicate of ALWAYS so it is always TRUE.
8645
8646 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8647 and NetBSD all use a software interrupt rather than an undefined
8648 instruction to force a trap. This can be handled by by the
8649 abi-specific code during establishment of the gdbarch vector. */
8650
8651 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8652 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8653 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8654 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8655
8656 static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8657 static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8658 static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8659 static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8660
8661 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8662 the program counter value to determine whether a 16-bit or 32-bit
8663 breakpoint should be used. It returns a pointer to a string of
8664 bytes that encode a breakpoint instruction, stores the length of
8665 the string to *lenptr, and adjusts the program counter (if
8666 necessary) to point to the actual memory location where the
8667 breakpoint should be inserted. */
8668
8669 static const unsigned char *
8670 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8671 {
8672 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8673 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8674
8675 if (arm_pc_is_thumb (gdbarch, *pcptr))
8676 {
8677 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8678
8679 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8680 check whether we are replacing a 32-bit instruction. */
8681 if (tdep->thumb2_breakpoint != NULL)
8682 {
8683 gdb_byte buf[2];
8684 if (target_read_memory (*pcptr, buf, 2) == 0)
8685 {
8686 unsigned short inst1;
8687 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8688 if (thumb_insn_size (inst1) == 4)
8689 {
8690 *lenptr = tdep->thumb2_breakpoint_size;
8691 return tdep->thumb2_breakpoint;
8692 }
8693 }
8694 }
8695
8696 *lenptr = tdep->thumb_breakpoint_size;
8697 return tdep->thumb_breakpoint;
8698 }
8699 else
8700 {
8701 *lenptr = tdep->arm_breakpoint_size;
8702 return tdep->arm_breakpoint;
8703 }
8704 }
8705
8706 static void
8707 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8708 int *kindptr)
8709 {
8710 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8711
8712 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8713 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8714 that this is not confused with a 32-bit ARM breakpoint. */
8715 *kindptr = 3;
8716 }
8717
8718 /* Extract from an array REGBUF containing the (raw) register state a
8719 function return value of type TYPE, and copy that, in virtual
8720 format, into VALBUF. */
8721
8722 static void
8723 arm_extract_return_value (struct type *type, struct regcache *regs,
8724 gdb_byte *valbuf)
8725 {
8726 struct gdbarch *gdbarch = get_regcache_arch (regs);
8727 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8728
8729 if (TYPE_CODE_FLT == TYPE_CODE (type))
8730 {
8731 switch (gdbarch_tdep (gdbarch)->fp_model)
8732 {
8733 case ARM_FLOAT_FPA:
8734 {
8735 /* The value is in register F0 in internal format. We need to
8736 extract the raw value and then convert it to the desired
8737 internal type. */
8738 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8739
8740 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8741 convert_from_extended (floatformat_from_type (type), tmpbuf,
8742 valbuf, gdbarch_byte_order (gdbarch));
8743 }
8744 break;
8745
8746 case ARM_FLOAT_SOFT_FPA:
8747 case ARM_FLOAT_SOFT_VFP:
8748 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8749 not using the VFP ABI code. */
8750 case ARM_FLOAT_VFP:
8751 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8752 if (TYPE_LENGTH (type) > 4)
8753 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8754 valbuf + INT_REGISTER_SIZE);
8755 break;
8756
8757 default:
8758 internal_error (__FILE__, __LINE__,
8759 _("arm_extract_return_value: "
8760 "Floating point model not supported"));
8761 break;
8762 }
8763 }
8764 else if (TYPE_CODE (type) == TYPE_CODE_INT
8765 || TYPE_CODE (type) == TYPE_CODE_CHAR
8766 || TYPE_CODE (type) == TYPE_CODE_BOOL
8767 || TYPE_CODE (type) == TYPE_CODE_PTR
8768 || TYPE_CODE (type) == TYPE_CODE_REF
8769 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8770 {
8771 /* If the type is a plain integer, then the access is
8772 straight-forward. Otherwise we have to play around a bit
8773 more. */
8774 int len = TYPE_LENGTH (type);
8775 int regno = ARM_A1_REGNUM;
8776 ULONGEST tmp;
8777
8778 while (len > 0)
8779 {
8780 /* By using store_unsigned_integer we avoid having to do
8781 anything special for small big-endian values. */
8782 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8783 store_unsigned_integer (valbuf,
8784 (len > INT_REGISTER_SIZE
8785 ? INT_REGISTER_SIZE : len),
8786 byte_order, tmp);
8787 len -= INT_REGISTER_SIZE;
8788 valbuf += INT_REGISTER_SIZE;
8789 }
8790 }
8791 else
8792 {
8793 /* For a structure or union the behaviour is as if the value had
8794 been stored to word-aligned memory and then loaded into
8795 registers with 32-bit load instruction(s). */
8796 int len = TYPE_LENGTH (type);
8797 int regno = ARM_A1_REGNUM;
8798 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8799
8800 while (len > 0)
8801 {
8802 regcache_cooked_read (regs, regno++, tmpbuf);
8803 memcpy (valbuf, tmpbuf,
8804 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8805 len -= INT_REGISTER_SIZE;
8806 valbuf += INT_REGISTER_SIZE;
8807 }
8808 }
8809 }
8810
8811
8812 /* Will a function return an aggregate type in memory or in a
8813 register? Return 0 if an aggregate type can be returned in a
8814 register, 1 if it must be returned in memory. */
8815
8816 static int
8817 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8818 {
8819 int nRc;
8820 enum type_code code;
8821
8822 CHECK_TYPEDEF (type);
8823
8824 /* In the ARM ABI, "integer" like aggregate types are returned in
8825 registers. For an aggregate type to be integer like, its size
8826 must be less than or equal to INT_REGISTER_SIZE and the
8827 offset of each addressable subfield must be zero. Note that bit
8828 fields are not addressable, and all addressable subfields of
8829 unions always start at offset zero.
8830
8831 This function is based on the behaviour of GCC 2.95.1.
8832 See: gcc/arm.c: arm_return_in_memory() for details.
8833
8834 Note: All versions of GCC before GCC 2.95.2 do not set up the
8835 parameters correctly for a function returning the following
8836 structure: struct { float f;}; This should be returned in memory,
8837 not a register. Richard Earnshaw sent me a patch, but I do not
8838 know of any way to detect if a function like the above has been
8839 compiled with the correct calling convention. */
8840
8841 /* All aggregate types that won't fit in a register must be returned
8842 in memory. */
8843 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8844 {
8845 return 1;
8846 }
8847
8848 /* The AAPCS says all aggregates not larger than a word are returned
8849 in a register. */
8850 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8851 return 0;
8852
8853 /* The only aggregate types that can be returned in a register are
8854 structs and unions. Arrays must be returned in memory. */
8855 code = TYPE_CODE (type);
8856 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8857 {
8858 return 1;
8859 }
8860
8861 /* Assume all other aggregate types can be returned in a register.
8862 Run a check for structures, unions and arrays. */
8863 nRc = 0;
8864
8865 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8866 {
8867 int i;
8868 /* Need to check if this struct/union is "integer" like. For
8869 this to be true, its size must be less than or equal to
8870 INT_REGISTER_SIZE and the offset of each addressable
8871 subfield must be zero. Note that bit fields are not
8872 addressable, and unions always start at offset zero. If any
8873 of the subfields is a floating point type, the struct/union
8874 cannot be an integer type. */
8875
8876 /* For each field in the object, check:
8877 1) Is it FP? --> yes, nRc = 1;
8878 2) Is it addressable (bitpos != 0) and
8879 not packed (bitsize == 0)?
8880 --> yes, nRc = 1
8881 */
8882
8883 for (i = 0; i < TYPE_NFIELDS (type); i++)
8884 {
8885 enum type_code field_type_code;
8886 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
8887 i)));
8888
8889 /* Is it a floating point type field? */
8890 if (field_type_code == TYPE_CODE_FLT)
8891 {
8892 nRc = 1;
8893 break;
8894 }
8895
8896 /* If bitpos != 0, then we have to care about it. */
8897 if (TYPE_FIELD_BITPOS (type, i) != 0)
8898 {
8899 /* Bitfields are not addressable. If the field bitsize is
8900 zero, then the field is not packed. Hence it cannot be
8901 a bitfield or any other packed type. */
8902 if (TYPE_FIELD_BITSIZE (type, i) == 0)
8903 {
8904 nRc = 1;
8905 break;
8906 }
8907 }
8908 }
8909 }
8910
8911 return nRc;
8912 }
8913
8914 /* Write into appropriate registers a function return value of type
8915 TYPE, given in virtual format. */
8916
8917 static void
8918 arm_store_return_value (struct type *type, struct regcache *regs,
8919 const gdb_byte *valbuf)
8920 {
8921 struct gdbarch *gdbarch = get_regcache_arch (regs);
8922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8923
8924 if (TYPE_CODE (type) == TYPE_CODE_FLT)
8925 {
8926 char buf[MAX_REGISTER_SIZE];
8927
8928 switch (gdbarch_tdep (gdbarch)->fp_model)
8929 {
8930 case ARM_FLOAT_FPA:
8931
8932 convert_to_extended (floatformat_from_type (type), buf, valbuf,
8933 gdbarch_byte_order (gdbarch));
8934 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
8935 break;
8936
8937 case ARM_FLOAT_SOFT_FPA:
8938 case ARM_FLOAT_SOFT_VFP:
8939 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8940 not using the VFP ABI code. */
8941 case ARM_FLOAT_VFP:
8942 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
8943 if (TYPE_LENGTH (type) > 4)
8944 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
8945 valbuf + INT_REGISTER_SIZE);
8946 break;
8947
8948 default:
8949 internal_error (__FILE__, __LINE__,
8950 _("arm_store_return_value: Floating "
8951 "point model not supported"));
8952 break;
8953 }
8954 }
8955 else if (TYPE_CODE (type) == TYPE_CODE_INT
8956 || TYPE_CODE (type) == TYPE_CODE_CHAR
8957 || TYPE_CODE (type) == TYPE_CODE_BOOL
8958 || TYPE_CODE (type) == TYPE_CODE_PTR
8959 || TYPE_CODE (type) == TYPE_CODE_REF
8960 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8961 {
8962 if (TYPE_LENGTH (type) <= 4)
8963 {
8964 /* Values of one word or less are zero/sign-extended and
8965 returned in r0. */
8966 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8967 LONGEST val = unpack_long (type, valbuf);
8968
8969 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
8970 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
8971 }
8972 else
8973 {
8974 /* Integral values greater than one word are stored in consecutive
8975 registers starting with r0. This will always be a multiple of
8976 the regiser size. */
8977 int len = TYPE_LENGTH (type);
8978 int regno = ARM_A1_REGNUM;
8979
8980 while (len > 0)
8981 {
8982 regcache_cooked_write (regs, regno++, valbuf);
8983 len -= INT_REGISTER_SIZE;
8984 valbuf += INT_REGISTER_SIZE;
8985 }
8986 }
8987 }
8988 else
8989 {
8990 /* For a structure or union the behaviour is as if the value had
8991 been stored to word-aligned memory and then loaded into
8992 registers with 32-bit load instruction(s). */
8993 int len = TYPE_LENGTH (type);
8994 int regno = ARM_A1_REGNUM;
8995 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8996
8997 while (len > 0)
8998 {
8999 memcpy (tmpbuf, valbuf,
9000 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
9001 regcache_cooked_write (regs, regno++, tmpbuf);
9002 len -= INT_REGISTER_SIZE;
9003 valbuf += INT_REGISTER_SIZE;
9004 }
9005 }
9006 }
9007
9008
9009 /* Handle function return values. */
9010
9011 static enum return_value_convention
9012 arm_return_value (struct gdbarch *gdbarch, struct value *function,
9013 struct type *valtype, struct regcache *regcache,
9014 gdb_byte *readbuf, const gdb_byte *writebuf)
9015 {
9016 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9017 struct type *func_type = function ? value_type (function) : NULL;
9018 enum arm_vfp_cprc_base_type vfp_base_type;
9019 int vfp_base_count;
9020
9021 if (arm_vfp_abi_for_function (gdbarch, func_type)
9022 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9023 {
9024 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9025 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9026 int i;
9027 for (i = 0; i < vfp_base_count; i++)
9028 {
9029 if (reg_char == 'q')
9030 {
9031 if (writebuf)
9032 arm_neon_quad_write (gdbarch, regcache, i,
9033 writebuf + i * unit_length);
9034
9035 if (readbuf)
9036 arm_neon_quad_read (gdbarch, regcache, i,
9037 readbuf + i * unit_length);
9038 }
9039 else
9040 {
9041 char name_buf[4];
9042 int regnum;
9043
9044 sprintf (name_buf, "%c%d", reg_char, i);
9045 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9046 strlen (name_buf));
9047 if (writebuf)
9048 regcache_cooked_write (regcache, regnum,
9049 writebuf + i * unit_length);
9050 if (readbuf)
9051 regcache_cooked_read (regcache, regnum,
9052 readbuf + i * unit_length);
9053 }
9054 }
9055 return RETURN_VALUE_REGISTER_CONVENTION;
9056 }
9057
9058 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9059 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9060 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9061 {
9062 if (tdep->struct_return == pcc_struct_return
9063 || arm_return_in_memory (gdbarch, valtype))
9064 return RETURN_VALUE_STRUCT_CONVENTION;
9065 }
9066
9067 /* AAPCS returns complex types longer than a register in memory. */
9068 if (tdep->arm_abi != ARM_ABI_APCS
9069 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9070 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9071 return RETURN_VALUE_STRUCT_CONVENTION;
9072
9073 if (writebuf)
9074 arm_store_return_value (valtype, regcache, writebuf);
9075
9076 if (readbuf)
9077 arm_extract_return_value (valtype, regcache, readbuf);
9078
9079 return RETURN_VALUE_REGISTER_CONVENTION;
9080 }
9081
9082
9083 static int
9084 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9085 {
9086 struct gdbarch *gdbarch = get_frame_arch (frame);
9087 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9088 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9089 CORE_ADDR jb_addr;
9090 char buf[INT_REGISTER_SIZE];
9091
9092 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9093
9094 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9095 INT_REGISTER_SIZE))
9096 return 0;
9097
9098 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9099 return 1;
9100 }
9101
9102 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9103 return the target PC. Otherwise return 0. */
9104
9105 CORE_ADDR
9106 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9107 {
9108 const char *name;
9109 int namelen;
9110 CORE_ADDR start_addr;
9111
9112 /* Find the starting address and name of the function containing the PC. */
9113 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9114 return 0;
9115
9116 /* If PC is in a Thumb call or return stub, return the address of the
9117 target PC, which is in a register. The thunk functions are called
9118 _call_via_xx, where x is the register name. The possible names
9119 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9120 functions, named __ARM_call_via_r[0-7]. */
9121 if (strncmp (name, "_call_via_", 10) == 0
9122 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9123 {
9124 /* Use the name suffix to determine which register contains the
9125 target PC. */
9126 static char *table[15] =
9127 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9128 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9129 };
9130 int regno;
9131 int offset = strlen (name) - 2;
9132
9133 for (regno = 0; regno <= 14; regno++)
9134 if (strcmp (&name[offset], table[regno]) == 0)
9135 return get_frame_register_unsigned (frame, regno);
9136 }
9137
9138 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9139 non-interworking calls to foo. We could decode the stubs
9140 to find the target but it's easier to use the symbol table. */
9141 namelen = strlen (name);
9142 if (name[0] == '_' && name[1] == '_'
9143 && ((namelen > 2 + strlen ("_from_thumb")
9144 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9145 strlen ("_from_thumb")) == 0)
9146 || (namelen > 2 + strlen ("_from_arm")
9147 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9148 strlen ("_from_arm")) == 0)))
9149 {
9150 char *target_name;
9151 int target_len = namelen - 2;
9152 struct minimal_symbol *minsym;
9153 struct objfile *objfile;
9154 struct obj_section *sec;
9155
9156 if (name[namelen - 1] == 'b')
9157 target_len -= strlen ("_from_thumb");
9158 else
9159 target_len -= strlen ("_from_arm");
9160
9161 target_name = alloca (target_len + 1);
9162 memcpy (target_name, name + 2, target_len);
9163 target_name[target_len] = '\0';
9164
9165 sec = find_pc_section (pc);
9166 objfile = (sec == NULL) ? NULL : sec->objfile;
9167 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9168 if (minsym != NULL)
9169 return SYMBOL_VALUE_ADDRESS (minsym);
9170 else
9171 return 0;
9172 }
9173
9174 return 0; /* not a stub */
9175 }
9176
9177 static void
9178 set_arm_command (char *args, int from_tty)
9179 {
9180 printf_unfiltered (_("\
9181 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9182 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9183 }
9184
9185 static void
9186 show_arm_command (char *args, int from_tty)
9187 {
9188 cmd_show_list (showarmcmdlist, from_tty, "");
9189 }
9190
9191 static void
9192 arm_update_current_architecture (void)
9193 {
9194 struct gdbarch_info info;
9195
9196 /* If the current architecture is not ARM, we have nothing to do. */
9197 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm)
9198 return;
9199
9200 /* Update the architecture. */
9201 gdbarch_info_init (&info);
9202
9203 if (!gdbarch_update_p (info))
9204 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9205 }
9206
9207 static void
9208 set_fp_model_sfunc (char *args, int from_tty,
9209 struct cmd_list_element *c)
9210 {
9211 enum arm_float_model fp_model;
9212
9213 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9214 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9215 {
9216 arm_fp_model = fp_model;
9217 break;
9218 }
9219
9220 if (fp_model == ARM_FLOAT_LAST)
9221 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9222 current_fp_model);
9223
9224 arm_update_current_architecture ();
9225 }
9226
9227 static void
9228 show_fp_model (struct ui_file *file, int from_tty,
9229 struct cmd_list_element *c, const char *value)
9230 {
9231 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9232
9233 if (arm_fp_model == ARM_FLOAT_AUTO
9234 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9235 fprintf_filtered (file, _("\
9236 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9237 fp_model_strings[tdep->fp_model]);
9238 else
9239 fprintf_filtered (file, _("\
9240 The current ARM floating point model is \"%s\".\n"),
9241 fp_model_strings[arm_fp_model]);
9242 }
9243
9244 static void
9245 arm_set_abi (char *args, int from_tty,
9246 struct cmd_list_element *c)
9247 {
9248 enum arm_abi_kind arm_abi;
9249
9250 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9251 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9252 {
9253 arm_abi_global = arm_abi;
9254 break;
9255 }
9256
9257 if (arm_abi == ARM_ABI_LAST)
9258 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9259 arm_abi_string);
9260
9261 arm_update_current_architecture ();
9262 }
9263
9264 static void
9265 arm_show_abi (struct ui_file *file, int from_tty,
9266 struct cmd_list_element *c, const char *value)
9267 {
9268 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9269
9270 if (arm_abi_global == ARM_ABI_AUTO
9271 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm)
9272 fprintf_filtered (file, _("\
9273 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9274 arm_abi_strings[tdep->arm_abi]);
9275 else
9276 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9277 arm_abi_string);
9278 }
9279
9280 static void
9281 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9282 struct cmd_list_element *c, const char *value)
9283 {
9284 fprintf_filtered (file,
9285 _("The current execution mode assumed "
9286 "(when symbols are unavailable) is \"%s\".\n"),
9287 arm_fallback_mode_string);
9288 }
9289
9290 static void
9291 arm_show_force_mode (struct ui_file *file, int from_tty,
9292 struct cmd_list_element *c, const char *value)
9293 {
9294 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch);
9295
9296 fprintf_filtered (file,
9297 _("The current execution mode assumed "
9298 "(even when symbols are available) is \"%s\".\n"),
9299 arm_force_mode_string);
9300 }
9301
9302 /* If the user changes the register disassembly style used for info
9303 register and other commands, we have to also switch the style used
9304 in opcodes for disassembly output. This function is run in the "set
9305 arm disassembly" command, and does that. */
9306
9307 static void
9308 set_disassembly_style_sfunc (char *args, int from_tty,
9309 struct cmd_list_element *c)
9310 {
9311 set_disassembly_style ();
9312 }
9313 \f
9314 /* Return the ARM register name corresponding to register I. */
9315 static const char *
9316 arm_register_name (struct gdbarch *gdbarch, int i)
9317 {
9318 const int num_regs = gdbarch_num_regs (gdbarch);
9319
9320 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9321 && i >= num_regs && i < num_regs + 32)
9322 {
9323 static const char *const vfp_pseudo_names[] = {
9324 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9325 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9326 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9327 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9328 };
9329
9330 return vfp_pseudo_names[i - num_regs];
9331 }
9332
9333 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9334 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9335 {
9336 static const char *const neon_pseudo_names[] = {
9337 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9338 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9339 };
9340
9341 return neon_pseudo_names[i - num_regs - 32];
9342 }
9343
9344 if (i >= ARRAY_SIZE (arm_register_names))
9345 /* These registers are only supported on targets which supply
9346 an XML description. */
9347 return "";
9348
9349 return arm_register_names[i];
9350 }
9351
9352 static void
9353 set_disassembly_style (void)
9354 {
9355 int current;
9356
9357 /* Find the style that the user wants. */
9358 for (current = 0; current < num_disassembly_options; current++)
9359 if (disassembly_style == valid_disassembly_styles[current])
9360 break;
9361 gdb_assert (current < num_disassembly_options);
9362
9363 /* Synchronize the disassembler. */
9364 set_arm_regname_option (current);
9365 }
9366
9367 /* Test whether the coff symbol specific value corresponds to a Thumb
9368 function. */
9369
9370 static int
9371 coff_sym_is_thumb (int val)
9372 {
9373 return (val == C_THUMBEXT
9374 || val == C_THUMBSTAT
9375 || val == C_THUMBEXTFUNC
9376 || val == C_THUMBSTATFUNC
9377 || val == C_THUMBLABEL);
9378 }
9379
9380 /* arm_coff_make_msymbol_special()
9381 arm_elf_make_msymbol_special()
9382
9383 These functions test whether the COFF or ELF symbol corresponds to
9384 an address in thumb code, and set a "special" bit in a minimal
9385 symbol to indicate that it does. */
9386
9387 static void
9388 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9389 {
9390 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9391 == ST_BRANCH_TO_THUMB)
9392 MSYMBOL_SET_SPECIAL (msym);
9393 }
9394
9395 static void
9396 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9397 {
9398 if (coff_sym_is_thumb (val))
9399 MSYMBOL_SET_SPECIAL (msym);
9400 }
9401
9402 static void
9403 arm_objfile_data_free (struct objfile *objfile, void *arg)
9404 {
9405 struct arm_per_objfile *data = arg;
9406 unsigned int i;
9407
9408 for (i = 0; i < objfile->obfd->section_count; i++)
9409 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9410 }
9411
9412 static void
9413 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9414 asymbol *sym)
9415 {
9416 const char *name = bfd_asymbol_name (sym);
9417 struct arm_per_objfile *data;
9418 VEC(arm_mapping_symbol_s) **map_p;
9419 struct arm_mapping_symbol new_map_sym;
9420
9421 gdb_assert (name[0] == '$');
9422 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9423 return;
9424
9425 data = objfile_data (objfile, arm_objfile_data_key);
9426 if (data == NULL)
9427 {
9428 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9429 struct arm_per_objfile);
9430 set_objfile_data (objfile, arm_objfile_data_key, data);
9431 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9432 objfile->obfd->section_count,
9433 VEC(arm_mapping_symbol_s) *);
9434 }
9435 map_p = &data->section_maps[bfd_get_section (sym)->index];
9436
9437 new_map_sym.value = sym->value;
9438 new_map_sym.type = name[1];
9439
9440 /* Assume that most mapping symbols appear in order of increasing
9441 value. If they were randomly distributed, it would be faster to
9442 always push here and then sort at first use. */
9443 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9444 {
9445 struct arm_mapping_symbol *prev_map_sym;
9446
9447 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9448 if (prev_map_sym->value >= sym->value)
9449 {
9450 unsigned int idx;
9451 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9452 arm_compare_mapping_symbols);
9453 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9454 return;
9455 }
9456 }
9457
9458 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9459 }
9460
9461 static void
9462 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9463 {
9464 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9465 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9466
9467 /* If necessary, set the T bit. */
9468 if (arm_apcs_32)
9469 {
9470 ULONGEST val, t_bit;
9471 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9472 t_bit = arm_psr_thumb_bit (gdbarch);
9473 if (arm_pc_is_thumb (gdbarch, pc))
9474 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9475 val | t_bit);
9476 else
9477 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9478 val & ~t_bit);
9479 }
9480 }
9481
9482 /* Read the contents of a NEON quad register, by reading from two
9483 double registers. This is used to implement the quad pseudo
9484 registers, and for argument passing in case the quad registers are
9485 missing; vectors are passed in quad registers when using the VFP
9486 ABI, even if a NEON unit is not present. REGNUM is the index of
9487 the quad register, in [0, 15]. */
9488
9489 static enum register_status
9490 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9491 int regnum, gdb_byte *buf)
9492 {
9493 char name_buf[4];
9494 gdb_byte reg_buf[8];
9495 int offset, double_regnum;
9496 enum register_status status;
9497
9498 sprintf (name_buf, "d%d", regnum << 1);
9499 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9500 strlen (name_buf));
9501
9502 /* d0 is always the least significant half of q0. */
9503 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9504 offset = 8;
9505 else
9506 offset = 0;
9507
9508 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9509 if (status != REG_VALID)
9510 return status;
9511 memcpy (buf + offset, reg_buf, 8);
9512
9513 offset = 8 - offset;
9514 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9515 if (status != REG_VALID)
9516 return status;
9517 memcpy (buf + offset, reg_buf, 8);
9518
9519 return REG_VALID;
9520 }
9521
9522 static enum register_status
9523 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9524 int regnum, gdb_byte *buf)
9525 {
9526 const int num_regs = gdbarch_num_regs (gdbarch);
9527 char name_buf[4];
9528 gdb_byte reg_buf[8];
9529 int offset, double_regnum;
9530
9531 gdb_assert (regnum >= num_regs);
9532 regnum -= num_regs;
9533
9534 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9535 /* Quad-precision register. */
9536 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9537 else
9538 {
9539 enum register_status status;
9540
9541 /* Single-precision register. */
9542 gdb_assert (regnum < 32);
9543
9544 /* s0 is always the least significant half of d0. */
9545 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9546 offset = (regnum & 1) ? 0 : 4;
9547 else
9548 offset = (regnum & 1) ? 4 : 0;
9549
9550 sprintf (name_buf, "d%d", regnum >> 1);
9551 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9552 strlen (name_buf));
9553
9554 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9555 if (status == REG_VALID)
9556 memcpy (buf, reg_buf + offset, 4);
9557 return status;
9558 }
9559 }
9560
9561 /* Store the contents of BUF to a NEON quad register, by writing to
9562 two double registers. This is used to implement the quad pseudo
9563 registers, and for argument passing in case the quad registers are
9564 missing; vectors are passed in quad registers when using the VFP
9565 ABI, even if a NEON unit is not present. REGNUM is the index
9566 of the quad register, in [0, 15]. */
9567
9568 static void
9569 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9570 int regnum, const gdb_byte *buf)
9571 {
9572 char name_buf[4];
9573 int offset, double_regnum;
9574
9575 sprintf (name_buf, "d%d", regnum << 1);
9576 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9577 strlen (name_buf));
9578
9579 /* d0 is always the least significant half of q0. */
9580 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9581 offset = 8;
9582 else
9583 offset = 0;
9584
9585 regcache_raw_write (regcache, double_regnum, buf + offset);
9586 offset = 8 - offset;
9587 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9588 }
9589
9590 static void
9591 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9592 int regnum, const gdb_byte *buf)
9593 {
9594 const int num_regs = gdbarch_num_regs (gdbarch);
9595 char name_buf[4];
9596 gdb_byte reg_buf[8];
9597 int offset, double_regnum;
9598
9599 gdb_assert (regnum >= num_regs);
9600 regnum -= num_regs;
9601
9602 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9603 /* Quad-precision register. */
9604 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9605 else
9606 {
9607 /* Single-precision register. */
9608 gdb_assert (regnum < 32);
9609
9610 /* s0 is always the least significant half of d0. */
9611 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9612 offset = (regnum & 1) ? 0 : 4;
9613 else
9614 offset = (regnum & 1) ? 4 : 0;
9615
9616 sprintf (name_buf, "d%d", regnum >> 1);
9617 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9618 strlen (name_buf));
9619
9620 regcache_raw_read (regcache, double_regnum, reg_buf);
9621 memcpy (reg_buf + offset, buf, 4);
9622 regcache_raw_write (regcache, double_regnum, reg_buf);
9623 }
9624 }
9625
9626 static struct value *
9627 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9628 {
9629 const int *reg_p = baton;
9630 return value_of_register (*reg_p, frame);
9631 }
9632 \f
9633 static enum gdb_osabi
9634 arm_elf_osabi_sniffer (bfd *abfd)
9635 {
9636 unsigned int elfosabi;
9637 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9638
9639 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9640
9641 if (elfosabi == ELFOSABI_ARM)
9642 /* GNU tools use this value. Check note sections in this case,
9643 as well. */
9644 bfd_map_over_sections (abfd,
9645 generic_elf_osabi_sniff_abi_tag_sections,
9646 &osabi);
9647
9648 /* Anything else will be handled by the generic ELF sniffer. */
9649 return osabi;
9650 }
9651
9652 static int
9653 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9654 struct reggroup *group)
9655 {
9656 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9657 this, FPS register belongs to save_regroup, restore_reggroup, and
9658 all_reggroup, of course. */
9659 if (regnum == ARM_FPS_REGNUM)
9660 return (group == float_reggroup
9661 || group == save_reggroup
9662 || group == restore_reggroup
9663 || group == all_reggroup);
9664 else
9665 return default_register_reggroup_p (gdbarch, regnum, group);
9666 }
9667
9668 \f
9669 /* For backward-compatibility we allow two 'g' packet lengths with
9670 the remote protocol depending on whether FPA registers are
9671 supplied. M-profile targets do not have FPA registers, but some
9672 stubs already exist in the wild which use a 'g' packet which
9673 supplies them albeit with dummy values. The packet format which
9674 includes FPA registers should be considered deprecated for
9675 M-profile targets. */
9676
9677 static void
9678 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
9679 {
9680 if (gdbarch_tdep (gdbarch)->is_m)
9681 {
9682 /* If we know from the executable this is an M-profile target,
9683 cater for remote targets whose register set layout is the
9684 same as the FPA layout. */
9685 register_remote_g_packet_guess (gdbarch,
9686 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
9687 (16 * INT_REGISTER_SIZE)
9688 + (8 * FP_REGISTER_SIZE)
9689 + (2 * INT_REGISTER_SIZE),
9690 tdesc_arm_with_m_fpa_layout);
9691
9692 /* The regular M-profile layout. */
9693 register_remote_g_packet_guess (gdbarch,
9694 /* r0-r12,sp,lr,pc; xpsr */
9695 (16 * INT_REGISTER_SIZE)
9696 + INT_REGISTER_SIZE,
9697 tdesc_arm_with_m);
9698
9699 /* M-profile plus M4F VFP. */
9700 register_remote_g_packet_guess (gdbarch,
9701 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
9702 (16 * INT_REGISTER_SIZE)
9703 + (16 * VFP_REGISTER_SIZE)
9704 + (2 * INT_REGISTER_SIZE),
9705 tdesc_arm_with_m_vfp_d16);
9706 }
9707
9708 /* Otherwise we don't have a useful guess. */
9709 }
9710
9711 \f
9712 /* Initialize the current architecture based on INFO. If possible,
9713 re-use an architecture from ARCHES, which is a list of
9714 architectures already created during this debugging session.
9715
9716 Called e.g. at program startup, when reading a core file, and when
9717 reading a binary file. */
9718
9719 static struct gdbarch *
9720 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9721 {
9722 struct gdbarch_tdep *tdep;
9723 struct gdbarch *gdbarch;
9724 struct gdbarch_list *best_arch;
9725 enum arm_abi_kind arm_abi = arm_abi_global;
9726 enum arm_float_model fp_model = arm_fp_model;
9727 struct tdesc_arch_data *tdesc_data = NULL;
9728 int i, is_m = 0;
9729 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9730 int have_neon = 0;
9731 int have_fpa_registers = 1;
9732 const struct target_desc *tdesc = info.target_desc;
9733
9734 /* If we have an object to base this architecture on, try to determine
9735 its ABI. */
9736
9737 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9738 {
9739 int ei_osabi, e_flags;
9740
9741 switch (bfd_get_flavour (info.abfd))
9742 {
9743 case bfd_target_aout_flavour:
9744 /* Assume it's an old APCS-style ABI. */
9745 arm_abi = ARM_ABI_APCS;
9746 break;
9747
9748 case bfd_target_coff_flavour:
9749 /* Assume it's an old APCS-style ABI. */
9750 /* XXX WinCE? */
9751 arm_abi = ARM_ABI_APCS;
9752 break;
9753
9754 case bfd_target_elf_flavour:
9755 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9756 e_flags = elf_elfheader (info.abfd)->e_flags;
9757
9758 if (ei_osabi == ELFOSABI_ARM)
9759 {
9760 /* GNU tools used to use this value, but do not for EABI
9761 objects. There's nowhere to tag an EABI version
9762 anyway, so assume APCS. */
9763 arm_abi = ARM_ABI_APCS;
9764 }
9765 else if (ei_osabi == ELFOSABI_NONE)
9766 {
9767 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9768 int attr_arch, attr_profile;
9769
9770 switch (eabi_ver)
9771 {
9772 case EF_ARM_EABI_UNKNOWN:
9773 /* Assume GNU tools. */
9774 arm_abi = ARM_ABI_APCS;
9775 break;
9776
9777 case EF_ARM_EABI_VER4:
9778 case EF_ARM_EABI_VER5:
9779 arm_abi = ARM_ABI_AAPCS;
9780 /* EABI binaries default to VFP float ordering.
9781 They may also contain build attributes that can
9782 be used to identify if the VFP argument-passing
9783 ABI is in use. */
9784 if (fp_model == ARM_FLOAT_AUTO)
9785 {
9786 #ifdef HAVE_ELF
9787 switch (bfd_elf_get_obj_attr_int (info.abfd,
9788 OBJ_ATTR_PROC,
9789 Tag_ABI_VFP_args))
9790 {
9791 case 0:
9792 /* "The user intended FP parameter/result
9793 passing to conform to AAPCS, base
9794 variant". */
9795 fp_model = ARM_FLOAT_SOFT_VFP;
9796 break;
9797 case 1:
9798 /* "The user intended FP parameter/result
9799 passing to conform to AAPCS, VFP
9800 variant". */
9801 fp_model = ARM_FLOAT_VFP;
9802 break;
9803 case 2:
9804 /* "The user intended FP parameter/result
9805 passing to conform to tool chain-specific
9806 conventions" - we don't know any such
9807 conventions, so leave it as "auto". */
9808 break;
9809 default:
9810 /* Attribute value not mentioned in the
9811 October 2008 ABI, so leave it as
9812 "auto". */
9813 break;
9814 }
9815 #else
9816 fp_model = ARM_FLOAT_SOFT_VFP;
9817 #endif
9818 }
9819 break;
9820
9821 default:
9822 /* Leave it as "auto". */
9823 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9824 break;
9825 }
9826
9827 #ifdef HAVE_ELF
9828 /* Detect M-profile programs. This only works if the
9829 executable file includes build attributes; GCC does
9830 copy them to the executable, but e.g. RealView does
9831 not. */
9832 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9833 Tag_CPU_arch);
9834 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9835 OBJ_ATTR_PROC,
9836 Tag_CPU_arch_profile);
9837 /* GCC specifies the profile for v6-M; RealView only
9838 specifies the profile for architectures starting with
9839 V7 (as opposed to architectures with a tag
9840 numerically greater than TAG_CPU_ARCH_V7). */
9841 if (!tdesc_has_registers (tdesc)
9842 && (attr_arch == TAG_CPU_ARCH_V6_M
9843 || attr_arch == TAG_CPU_ARCH_V6S_M
9844 || attr_profile == 'M'))
9845 is_m = 1;
9846 #endif
9847 }
9848
9849 if (fp_model == ARM_FLOAT_AUTO)
9850 {
9851 int e_flags = elf_elfheader (info.abfd)->e_flags;
9852
9853 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9854 {
9855 case 0:
9856 /* Leave it as "auto". Strictly speaking this case
9857 means FPA, but almost nobody uses that now, and
9858 many toolchains fail to set the appropriate bits
9859 for the floating-point model they use. */
9860 break;
9861 case EF_ARM_SOFT_FLOAT:
9862 fp_model = ARM_FLOAT_SOFT_FPA;
9863 break;
9864 case EF_ARM_VFP_FLOAT:
9865 fp_model = ARM_FLOAT_VFP;
9866 break;
9867 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9868 fp_model = ARM_FLOAT_SOFT_VFP;
9869 break;
9870 }
9871 }
9872
9873 if (e_flags & EF_ARM_BE8)
9874 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9875
9876 break;
9877
9878 default:
9879 /* Leave it as "auto". */
9880 break;
9881 }
9882 }
9883
9884 /* Check any target description for validity. */
9885 if (tdesc_has_registers (tdesc))
9886 {
9887 /* For most registers we require GDB's default names; but also allow
9888 the numeric names for sp / lr / pc, as a convenience. */
9889 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
9890 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
9891 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
9892
9893 const struct tdesc_feature *feature;
9894 int valid_p;
9895
9896 feature = tdesc_find_feature (tdesc,
9897 "org.gnu.gdb.arm.core");
9898 if (feature == NULL)
9899 {
9900 feature = tdesc_find_feature (tdesc,
9901 "org.gnu.gdb.arm.m-profile");
9902 if (feature == NULL)
9903 return NULL;
9904 else
9905 is_m = 1;
9906 }
9907
9908 tdesc_data = tdesc_data_alloc ();
9909
9910 valid_p = 1;
9911 for (i = 0; i < ARM_SP_REGNUM; i++)
9912 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9913 arm_register_names[i]);
9914 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9915 ARM_SP_REGNUM,
9916 arm_sp_names);
9917 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9918 ARM_LR_REGNUM,
9919 arm_lr_names);
9920 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
9921 ARM_PC_REGNUM,
9922 arm_pc_names);
9923 if (is_m)
9924 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9925 ARM_PS_REGNUM, "xpsr");
9926 else
9927 valid_p &= tdesc_numbered_register (feature, tdesc_data,
9928 ARM_PS_REGNUM, "cpsr");
9929
9930 if (!valid_p)
9931 {
9932 tdesc_data_cleanup (tdesc_data);
9933 return NULL;
9934 }
9935
9936 feature = tdesc_find_feature (tdesc,
9937 "org.gnu.gdb.arm.fpa");
9938 if (feature != NULL)
9939 {
9940 valid_p = 1;
9941 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
9942 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
9943 arm_register_names[i]);
9944 if (!valid_p)
9945 {
9946 tdesc_data_cleanup (tdesc_data);
9947 return NULL;
9948 }
9949 }
9950 else
9951 have_fpa_registers = 0;
9952
9953 feature = tdesc_find_feature (tdesc,
9954 "org.gnu.gdb.xscale.iwmmxt");
9955 if (feature != NULL)
9956 {
9957 static const char *const iwmmxt_names[] = {
9958 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
9959 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
9960 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
9961 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
9962 };
9963
9964 valid_p = 1;
9965 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
9966 valid_p
9967 &= tdesc_numbered_register (feature, tdesc_data, i,
9968 iwmmxt_names[i - ARM_WR0_REGNUM]);
9969
9970 /* Check for the control registers, but do not fail if they
9971 are missing. */
9972 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
9973 tdesc_numbered_register (feature, tdesc_data, i,
9974 iwmmxt_names[i - ARM_WR0_REGNUM]);
9975
9976 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
9977 valid_p
9978 &= tdesc_numbered_register (feature, tdesc_data, i,
9979 iwmmxt_names[i - ARM_WR0_REGNUM]);
9980
9981 if (!valid_p)
9982 {
9983 tdesc_data_cleanup (tdesc_data);
9984 return NULL;
9985 }
9986 }
9987
9988 /* If we have a VFP unit, check whether the single precision registers
9989 are present. If not, then we will synthesize them as pseudo
9990 registers. */
9991 feature = tdesc_find_feature (tdesc,
9992 "org.gnu.gdb.arm.vfp");
9993 if (feature != NULL)
9994 {
9995 static const char *const vfp_double_names[] = {
9996 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
9997 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
9998 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
9999 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
10000 };
10001
10002 /* Require the double precision registers. There must be either
10003 16 or 32. */
10004 valid_p = 1;
10005 for (i = 0; i < 32; i++)
10006 {
10007 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10008 ARM_D0_REGNUM + i,
10009 vfp_double_names[i]);
10010 if (!valid_p)
10011 break;
10012 }
10013 if (!valid_p && i == 16)
10014 valid_p = 1;
10015
10016 /* Also require FPSCR. */
10017 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10018 ARM_FPSCR_REGNUM, "fpscr");
10019 if (!valid_p)
10020 {
10021 tdesc_data_cleanup (tdesc_data);
10022 return NULL;
10023 }
10024
10025 if (tdesc_unnumbered_register (feature, "s0") == 0)
10026 have_vfp_pseudos = 1;
10027
10028 have_vfp_registers = 1;
10029
10030 /* If we have VFP, also check for NEON. The architecture allows
10031 NEON without VFP (integer vector operations only), but GDB
10032 does not support that. */
10033 feature = tdesc_find_feature (tdesc,
10034 "org.gnu.gdb.arm.neon");
10035 if (feature != NULL)
10036 {
10037 /* NEON requires 32 double-precision registers. */
10038 if (i != 32)
10039 {
10040 tdesc_data_cleanup (tdesc_data);
10041 return NULL;
10042 }
10043
10044 /* If there are quad registers defined by the stub, use
10045 their type; otherwise (normally) provide them with
10046 the default type. */
10047 if (tdesc_unnumbered_register (feature, "q0") == 0)
10048 have_neon_pseudos = 1;
10049
10050 have_neon = 1;
10051 }
10052 }
10053 }
10054
10055 /* If there is already a candidate, use it. */
10056 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10057 best_arch != NULL;
10058 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10059 {
10060 if (arm_abi != ARM_ABI_AUTO
10061 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10062 continue;
10063
10064 if (fp_model != ARM_FLOAT_AUTO
10065 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10066 continue;
10067
10068 /* There are various other properties in tdep that we do not
10069 need to check here: those derived from a target description,
10070 since gdbarches with a different target description are
10071 automatically disqualified. */
10072
10073 /* Do check is_m, though, since it might come from the binary. */
10074 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10075 continue;
10076
10077 /* Found a match. */
10078 break;
10079 }
10080
10081 if (best_arch != NULL)
10082 {
10083 if (tdesc_data != NULL)
10084 tdesc_data_cleanup (tdesc_data);
10085 return best_arch->gdbarch;
10086 }
10087
10088 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10089 gdbarch = gdbarch_alloc (&info, tdep);
10090
10091 /* Record additional information about the architecture we are defining.
10092 These are gdbarch discriminators, like the OSABI. */
10093 tdep->arm_abi = arm_abi;
10094 tdep->fp_model = fp_model;
10095 tdep->is_m = is_m;
10096 tdep->have_fpa_registers = have_fpa_registers;
10097 tdep->have_vfp_registers = have_vfp_registers;
10098 tdep->have_vfp_pseudos = have_vfp_pseudos;
10099 tdep->have_neon_pseudos = have_neon_pseudos;
10100 tdep->have_neon = have_neon;
10101
10102 arm_register_g_packet_guesses (gdbarch);
10103
10104 /* Breakpoints. */
10105 switch (info.byte_order_for_code)
10106 {
10107 case BFD_ENDIAN_BIG:
10108 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10109 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10110 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10111 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10112
10113 break;
10114
10115 case BFD_ENDIAN_LITTLE:
10116 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10117 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10118 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10119 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10120
10121 break;
10122
10123 default:
10124 internal_error (__FILE__, __LINE__,
10125 _("arm_gdbarch_init: bad byte order for float format"));
10126 }
10127
10128 /* On ARM targets char defaults to unsigned. */
10129 set_gdbarch_char_signed (gdbarch, 0);
10130
10131 /* Note: for displaced stepping, this includes the breakpoint, and one word
10132 of additional scratch space. This setting isn't used for anything beside
10133 displaced stepping at present. */
10134 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10135
10136 /* This should be low enough for everything. */
10137 tdep->lowest_pc = 0x20;
10138 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10139
10140 /* The default, for both APCS and AAPCS, is to return small
10141 structures in registers. */
10142 tdep->struct_return = reg_struct_return;
10143
10144 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10145 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10146
10147 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10148
10149 /* Frame handling. */
10150 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10151 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10152 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10153
10154 frame_base_set_default (gdbarch, &arm_normal_base);
10155
10156 /* Address manipulation. */
10157 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address);
10158 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10159
10160 /* Advance PC across function entry code. */
10161 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10162
10163 /* Detect whether PC is in function epilogue. */
10164 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10165
10166 /* Skip trampolines. */
10167 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10168
10169 /* The stack grows downward. */
10170 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10171
10172 /* Breakpoint manipulation. */
10173 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10174 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10175 arm_remote_breakpoint_from_pc);
10176
10177 /* Information about registers, etc. */
10178 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10179 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10180 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10181 set_gdbarch_register_type (gdbarch, arm_register_type);
10182 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10183
10184 /* This "info float" is FPA-specific. Use the generic version if we
10185 do not have FPA. */
10186 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10187 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10188
10189 /* Internal <-> external register number maps. */
10190 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10191 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10192
10193 set_gdbarch_register_name (gdbarch, arm_register_name);
10194
10195 /* Returning results. */
10196 set_gdbarch_return_value (gdbarch, arm_return_value);
10197
10198 /* Disassembly. */
10199 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10200
10201 /* Minsymbol frobbing. */
10202 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10203 set_gdbarch_coff_make_msymbol_special (gdbarch,
10204 arm_coff_make_msymbol_special);
10205 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10206
10207 /* Thumb-2 IT block support. */
10208 set_gdbarch_adjust_breakpoint_address (gdbarch,
10209 arm_adjust_breakpoint_address);
10210
10211 /* Virtual tables. */
10212 set_gdbarch_vbit_in_delta (gdbarch, 1);
10213
10214 /* Hook in the ABI-specific overrides, if they have been registered. */
10215 gdbarch_init_osabi (info, gdbarch);
10216
10217 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10218
10219 /* Add some default predicates. */
10220 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10221 dwarf2_append_unwinders (gdbarch);
10222 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10223 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10224
10225 /* Now we have tuned the configuration, set a few final things,
10226 based on what the OS ABI has told us. */
10227
10228 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10229 binaries are always marked. */
10230 if (tdep->arm_abi == ARM_ABI_AUTO)
10231 tdep->arm_abi = ARM_ABI_APCS;
10232
10233 /* Watchpoints are not steppable. */
10234 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10235
10236 /* We used to default to FPA for generic ARM, but almost nobody
10237 uses that now, and we now provide a way for the user to force
10238 the model. So default to the most useful variant. */
10239 if (tdep->fp_model == ARM_FLOAT_AUTO)
10240 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10241
10242 if (tdep->jb_pc >= 0)
10243 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10244
10245 /* Floating point sizes and format. */
10246 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10247 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10248 {
10249 set_gdbarch_double_format
10250 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10251 set_gdbarch_long_double_format
10252 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10253 }
10254 else
10255 {
10256 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10257 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10258 }
10259
10260 if (have_vfp_pseudos)
10261 {
10262 /* NOTE: These are the only pseudo registers used by
10263 the ARM target at the moment. If more are added, a
10264 little more care in numbering will be needed. */
10265
10266 int num_pseudos = 32;
10267 if (have_neon_pseudos)
10268 num_pseudos += 16;
10269 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10270 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10271 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10272 }
10273
10274 if (tdesc_data)
10275 {
10276 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10277
10278 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10279
10280 /* Override tdesc_register_type to adjust the types of VFP
10281 registers for NEON. */
10282 set_gdbarch_register_type (gdbarch, arm_register_type);
10283 }
10284
10285 /* Add standard register aliases. We add aliases even for those
10286 nanes which are used by the current architecture - it's simpler,
10287 and does no harm, since nothing ever lists user registers. */
10288 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10289 user_reg_add (gdbarch, arm_register_aliases[i].name,
10290 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10291
10292 return gdbarch;
10293 }
10294
10295 static void
10296 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10297 {
10298 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10299
10300 if (tdep == NULL)
10301 return;
10302
10303 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10304 (unsigned long) tdep->lowest_pc);
10305 }
10306
10307 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10308
10309 void
10310 _initialize_arm_tdep (void)
10311 {
10312 struct ui_file *stb;
10313 long length;
10314 struct cmd_list_element *new_set, *new_show;
10315 const char *setname;
10316 const char *setdesc;
10317 const char *const *regnames;
10318 int numregs, i, j;
10319 static char *helptext;
10320 char regdesc[1024], *rdptr = regdesc;
10321 size_t rest = sizeof (regdesc);
10322
10323 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10324
10325 arm_objfile_data_key
10326 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10327
10328 /* Add ourselves to objfile event chain. */
10329 observer_attach_new_objfile (arm_exidx_new_objfile);
10330 arm_exidx_data_key
10331 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10332
10333 /* Register an ELF OS ABI sniffer for ARM binaries. */
10334 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10335 bfd_target_elf_flavour,
10336 arm_elf_osabi_sniffer);
10337
10338 /* Initialize the standard target descriptions. */
10339 initialize_tdesc_arm_with_m ();
10340 initialize_tdesc_arm_with_m_fpa_layout ();
10341 initialize_tdesc_arm_with_m_vfp_d16 ();
10342 initialize_tdesc_arm_with_iwmmxt ();
10343 initialize_tdesc_arm_with_vfpv2 ();
10344 initialize_tdesc_arm_with_vfpv3 ();
10345 initialize_tdesc_arm_with_neon ();
10346
10347 /* Get the number of possible sets of register names defined in opcodes. */
10348 num_disassembly_options = get_arm_regname_num_options ();
10349
10350 /* Add root prefix command for all "set arm"/"show arm" commands. */
10351 add_prefix_cmd ("arm", no_class, set_arm_command,
10352 _("Various ARM-specific commands."),
10353 &setarmcmdlist, "set arm ", 0, &setlist);
10354
10355 add_prefix_cmd ("arm", no_class, show_arm_command,
10356 _("Various ARM-specific commands."),
10357 &showarmcmdlist, "show arm ", 0, &showlist);
10358
10359 /* Sync the opcode insn printer with our register viewer. */
10360 parse_arm_disassembler_option ("reg-names-std");
10361
10362 /* Initialize the array that will be passed to
10363 add_setshow_enum_cmd(). */
10364 valid_disassembly_styles
10365 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10366 for (i = 0; i < num_disassembly_options; i++)
10367 {
10368 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10369 valid_disassembly_styles[i] = setname;
10370 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10371 rdptr += length;
10372 rest -= length;
10373 /* When we find the default names, tell the disassembler to use
10374 them. */
10375 if (!strcmp (setname, "std"))
10376 {
10377 disassembly_style = setname;
10378 set_arm_regname_option (i);
10379 }
10380 }
10381 /* Mark the end of valid options. */
10382 valid_disassembly_styles[num_disassembly_options] = NULL;
10383
10384 /* Create the help text. */
10385 stb = mem_fileopen ();
10386 fprintf_unfiltered (stb, "%s%s%s",
10387 _("The valid values are:\n"),
10388 regdesc,
10389 _("The default is \"std\"."));
10390 helptext = ui_file_xstrdup (stb, NULL);
10391 ui_file_delete (stb);
10392
10393 add_setshow_enum_cmd("disassembler", no_class,
10394 valid_disassembly_styles, &disassembly_style,
10395 _("Set the disassembly style."),
10396 _("Show the disassembly style."),
10397 helptext,
10398 set_disassembly_style_sfunc,
10399 NULL, /* FIXME: i18n: The disassembly style is
10400 \"%s\". */
10401 &setarmcmdlist, &showarmcmdlist);
10402
10403 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10404 _("Set usage of ARM 32-bit mode."),
10405 _("Show usage of ARM 32-bit mode."),
10406 _("When off, a 26-bit PC will be used."),
10407 NULL,
10408 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10409 mode is %s. */
10410 &setarmcmdlist, &showarmcmdlist);
10411
10412 /* Add a command to allow the user to force the FPU model. */
10413 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10414 _("Set the floating point type."),
10415 _("Show the floating point type."),
10416 _("auto - Determine the FP typefrom the OS-ABI.\n\
10417 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10418 fpa - FPA co-processor (GCC compiled).\n\
10419 softvfp - Software FP with pure-endian doubles.\n\
10420 vfp - VFP co-processor."),
10421 set_fp_model_sfunc, show_fp_model,
10422 &setarmcmdlist, &showarmcmdlist);
10423
10424 /* Add a command to allow the user to force the ABI. */
10425 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10426 _("Set the ABI."),
10427 _("Show the ABI."),
10428 NULL, arm_set_abi, arm_show_abi,
10429 &setarmcmdlist, &showarmcmdlist);
10430
10431 /* Add two commands to allow the user to force the assumed
10432 execution mode. */
10433 add_setshow_enum_cmd ("fallback-mode", class_support,
10434 arm_mode_strings, &arm_fallback_mode_string,
10435 _("Set the mode assumed when symbols are unavailable."),
10436 _("Show the mode assumed when symbols are unavailable."),
10437 NULL, NULL, arm_show_fallback_mode,
10438 &setarmcmdlist, &showarmcmdlist);
10439 add_setshow_enum_cmd ("force-mode", class_support,
10440 arm_mode_strings, &arm_force_mode_string,
10441 _("Set the mode assumed even when symbols are available."),
10442 _("Show the mode assumed even when symbols are available."),
10443 NULL, NULL, arm_show_force_mode,
10444 &setarmcmdlist, &showarmcmdlist);
10445
10446 /* Debugging flag. */
10447 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10448 _("Set ARM debugging."),
10449 _("Show ARM debugging."),
10450 _("When on, arm-specific debugging is enabled."),
10451 NULL,
10452 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10453 &setdebuglist, &showdebuglist);
10454 }
10455
10456 /* ARM-reversible process record data structures. */
10457
10458 #define ARM_INSN_SIZE_BYTES 4
10459 #define THUMB_INSN_SIZE_BYTES 2
10460 #define THUMB2_INSN_SIZE_BYTES 4
10461
10462
10463 #define INSN_S_L_BIT_NUM 20
10464
10465 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
10466 do \
10467 { \
10468 unsigned int reg_len = LENGTH; \
10469 if (reg_len) \
10470 { \
10471 REGS = XNEWVEC (uint32_t, reg_len); \
10472 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
10473 } \
10474 } \
10475 while (0)
10476
10477 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
10478 do \
10479 { \
10480 unsigned int mem_len = LENGTH; \
10481 if (mem_len) \
10482 { \
10483 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
10484 memcpy(&MEMS->len, &RECORD_BUF[0], \
10485 sizeof(struct arm_mem_r) * LENGTH); \
10486 } \
10487 } \
10488 while (0)
10489
10490 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
10491 #define INSN_RECORDED(ARM_RECORD) \
10492 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
10493
10494 /* ARM memory record structure. */
10495 struct arm_mem_r
10496 {
10497 uint32_t len; /* Record length. */
10498 CORE_ADDR addr; /* Memory address. */
10499 };
10500
10501 /* ARM instruction record contains opcode of current insn
10502 and execution state (before entry to decode_insn()),
10503 contains list of to-be-modified registers and
10504 memory blocks (on return from decode_insn()). */
10505
10506 typedef struct insn_decode_record_t
10507 {
10508 struct gdbarch *gdbarch;
10509 struct regcache *regcache;
10510 CORE_ADDR this_addr; /* Address of the insn being decoded. */
10511 uint32_t arm_insn; /* Should accommodate thumb. */
10512 uint32_t cond; /* Condition code. */
10513 uint32_t opcode; /* Insn opcode. */
10514 uint32_t decode; /* Insn decode bits. */
10515 uint32_t mem_rec_count; /* No of mem records. */
10516 uint32_t reg_rec_count; /* No of reg records. */
10517 uint32_t *arm_regs; /* Registers to be saved for this record. */
10518 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
10519 } insn_decode_record;
10520
10521
10522 /* Checks ARM SBZ and SBO mandatory fields. */
10523
10524 static int
10525 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
10526 {
10527 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
10528
10529 if (!len)
10530 return 1;
10531
10532 if (!sbo)
10533 ones = ~ones;
10534
10535 while (ones)
10536 {
10537 if (!(ones & sbo))
10538 {
10539 return 0;
10540 }
10541 ones = ones >> 1;
10542 }
10543 return 1;
10544 }
10545
10546 typedef enum
10547 {
10548 ARM_RECORD_STRH=1,
10549 ARM_RECORD_STRD
10550 } arm_record_strx_t;
10551
10552 typedef enum
10553 {
10554 ARM_RECORD=1,
10555 THUMB_RECORD,
10556 THUMB2_RECORD
10557 } record_type_t;
10558
10559
10560 static int
10561 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
10562 uint32_t *record_buf_mem, arm_record_strx_t str_type)
10563 {
10564
10565 struct regcache *reg_cache = arm_insn_r->regcache;
10566 ULONGEST u_regval[2]= {0};
10567
10568 uint32_t reg_src1 = 0, reg_src2 = 0;
10569 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10570 uint32_t opcode1 = 0;
10571
10572 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10573 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10574 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10575
10576
10577 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10578 {
10579 /* 1) Handle misc store, immediate offset. */
10580 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10581 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10582 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10583 regcache_raw_read_unsigned (reg_cache, reg_src1,
10584 &u_regval[0]);
10585 if (ARM_PC_REGNUM == reg_src1)
10586 {
10587 /* If R15 was used as Rn, hence current PC+8. */
10588 u_regval[0] = u_regval[0] + 8;
10589 }
10590 offset_8 = (immed_high << 4) | immed_low;
10591 /* Calculate target store address. */
10592 if (14 == arm_insn_r->opcode)
10593 {
10594 tgt_mem_addr = u_regval[0] + offset_8;
10595 }
10596 else
10597 {
10598 tgt_mem_addr = u_regval[0] - offset_8;
10599 }
10600 if (ARM_RECORD_STRH == str_type)
10601 {
10602 record_buf_mem[0] = 2;
10603 record_buf_mem[1] = tgt_mem_addr;
10604 arm_insn_r->mem_rec_count = 1;
10605 }
10606 else if (ARM_RECORD_STRD == str_type)
10607 {
10608 record_buf_mem[0] = 4;
10609 record_buf_mem[1] = tgt_mem_addr;
10610 record_buf_mem[2] = 4;
10611 record_buf_mem[3] = tgt_mem_addr + 4;
10612 arm_insn_r->mem_rec_count = 2;
10613 }
10614 }
10615 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
10616 {
10617 /* 2) Store, register offset. */
10618 /* Get Rm. */
10619 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10620 /* Get Rn. */
10621 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10622 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10623 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10624 if (15 == reg_src2)
10625 {
10626 /* If R15 was used as Rn, hence current PC+8. */
10627 u_regval[0] = u_regval[0] + 8;
10628 }
10629 /* Calculate target store address, Rn +/- Rm, register offset. */
10630 if (12 == arm_insn_r->opcode)
10631 {
10632 tgt_mem_addr = u_regval[0] + u_regval[1];
10633 }
10634 else
10635 {
10636 tgt_mem_addr = u_regval[1] - u_regval[0];
10637 }
10638 if (ARM_RECORD_STRH == str_type)
10639 {
10640 record_buf_mem[0] = 2;
10641 record_buf_mem[1] = tgt_mem_addr;
10642 arm_insn_r->mem_rec_count = 1;
10643 }
10644 else if (ARM_RECORD_STRD == str_type)
10645 {
10646 record_buf_mem[0] = 4;
10647 record_buf_mem[1] = tgt_mem_addr;
10648 record_buf_mem[2] = 4;
10649 record_buf_mem[3] = tgt_mem_addr + 4;
10650 arm_insn_r->mem_rec_count = 2;
10651 }
10652 }
10653 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10654 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10655 {
10656 /* 3) Store, immediate pre-indexed. */
10657 /* 5) Store, immediate post-indexed. */
10658 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10659 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10660 offset_8 = (immed_high << 4) | immed_low;
10661 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10662 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10663 /* Calculate target store address, Rn +/- Rm, register offset. */
10664 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10665 {
10666 tgt_mem_addr = u_regval[0] + offset_8;
10667 }
10668 else
10669 {
10670 tgt_mem_addr = u_regval[0] - offset_8;
10671 }
10672 if (ARM_RECORD_STRH == str_type)
10673 {
10674 record_buf_mem[0] = 2;
10675 record_buf_mem[1] = tgt_mem_addr;
10676 arm_insn_r->mem_rec_count = 1;
10677 }
10678 else if (ARM_RECORD_STRD == str_type)
10679 {
10680 record_buf_mem[0] = 4;
10681 record_buf_mem[1] = tgt_mem_addr;
10682 record_buf_mem[2] = 4;
10683 record_buf_mem[3] = tgt_mem_addr + 4;
10684 arm_insn_r->mem_rec_count = 2;
10685 }
10686 /* Record Rn also as it changes. */
10687 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10688 arm_insn_r->reg_rec_count = 1;
10689 }
10690 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
10691 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10692 {
10693 /* 4) Store, register pre-indexed. */
10694 /* 6) Store, register post -indexed. */
10695 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10696 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10697 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10698 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10699 /* Calculate target store address, Rn +/- Rm, register offset. */
10700 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10701 {
10702 tgt_mem_addr = u_regval[0] + u_regval[1];
10703 }
10704 else
10705 {
10706 tgt_mem_addr = u_regval[1] - u_regval[0];
10707 }
10708 if (ARM_RECORD_STRH == str_type)
10709 {
10710 record_buf_mem[0] = 2;
10711 record_buf_mem[1] = tgt_mem_addr;
10712 arm_insn_r->mem_rec_count = 1;
10713 }
10714 else if (ARM_RECORD_STRD == str_type)
10715 {
10716 record_buf_mem[0] = 4;
10717 record_buf_mem[1] = tgt_mem_addr;
10718 record_buf_mem[2] = 4;
10719 record_buf_mem[3] = tgt_mem_addr + 4;
10720 arm_insn_r->mem_rec_count = 2;
10721 }
10722 /* Record Rn also as it changes. */
10723 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10724 arm_insn_r->reg_rec_count = 1;
10725 }
10726 return 0;
10727 }
10728
10729 /* Handling ARM extension space insns. */
10730
10731 static int
10732 arm_record_extension_space (insn_decode_record *arm_insn_r)
10733 {
10734 uint32_t ret = 0; /* Return value: -1:record failure ; 0:success */
10735 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
10736 uint32_t record_buf[8], record_buf_mem[8];
10737 uint32_t reg_src1 = 0;
10738 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10739 struct regcache *reg_cache = arm_insn_r->regcache;
10740 ULONGEST u_regval = 0;
10741
10742 gdb_assert (!INSN_RECORDED(arm_insn_r));
10743 /* Handle unconditional insn extension space. */
10744
10745 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
10746 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10747 if (arm_insn_r->cond)
10748 {
10749 /* PLD has no affect on architectural state, it just affects
10750 the caches. */
10751 if (5 == ((opcode1 & 0xE0) >> 5))
10752 {
10753 /* BLX(1) */
10754 record_buf[0] = ARM_PS_REGNUM;
10755 record_buf[1] = ARM_LR_REGNUM;
10756 arm_insn_r->reg_rec_count = 2;
10757 }
10758 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
10759 }
10760
10761
10762 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10763 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
10764 {
10765 ret = -1;
10766 /* Undefined instruction on ARM V5; need to handle if later
10767 versions define it. */
10768 }
10769
10770 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
10771 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10772 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
10773
10774 /* Handle arithmetic insn extension space. */
10775 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
10776 && !INSN_RECORDED(arm_insn_r))
10777 {
10778 /* Handle MLA(S) and MUL(S). */
10779 if (0 <= insn_op1 && 3 >= insn_op1)
10780 {
10781 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10782 record_buf[1] = ARM_PS_REGNUM;
10783 arm_insn_r->reg_rec_count = 2;
10784 }
10785 else if (4 <= insn_op1 && 15 >= insn_op1)
10786 {
10787 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10788 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10789 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10790 record_buf[2] = ARM_PS_REGNUM;
10791 arm_insn_r->reg_rec_count = 3;
10792 }
10793 }
10794
10795 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10796 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10797 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10798
10799 /* Handle control insn extension space. */
10800
10801 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
10802 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
10803 {
10804 if (!bit (arm_insn_r->arm_insn,25))
10805 {
10806 if (!bits (arm_insn_r->arm_insn, 4, 7))
10807 {
10808 if ((0 == insn_op1) || (2 == insn_op1))
10809 {
10810 /* MRS. */
10811 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10812 arm_insn_r->reg_rec_count = 1;
10813 }
10814 else if (1 == insn_op1)
10815 {
10816 /* CSPR is going to be changed. */
10817 record_buf[0] = ARM_PS_REGNUM;
10818 arm_insn_r->reg_rec_count = 1;
10819 }
10820 else if (3 == insn_op1)
10821 {
10822 /* SPSR is going to be changed. */
10823 /* We need to get SPSR value, which is yet to be done. */
10824 printf_unfiltered (_("Process record does not support "
10825 "instruction 0x%0x at address %s.\n"),
10826 arm_insn_r->arm_insn,
10827 paddress (arm_insn_r->gdbarch,
10828 arm_insn_r->this_addr));
10829 return -1;
10830 }
10831 }
10832 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
10833 {
10834 if (1 == insn_op1)
10835 {
10836 /* BX. */
10837 record_buf[0] = ARM_PS_REGNUM;
10838 arm_insn_r->reg_rec_count = 1;
10839 }
10840 else if (3 == insn_op1)
10841 {
10842 /* CLZ. */
10843 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10844 arm_insn_r->reg_rec_count = 1;
10845 }
10846 }
10847 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
10848 {
10849 /* BLX. */
10850 record_buf[0] = ARM_PS_REGNUM;
10851 record_buf[1] = ARM_LR_REGNUM;
10852 arm_insn_r->reg_rec_count = 2;
10853 }
10854 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
10855 {
10856 /* QADD, QSUB, QDADD, QDSUB */
10857 record_buf[0] = ARM_PS_REGNUM;
10858 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10859 arm_insn_r->reg_rec_count = 2;
10860 }
10861 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
10862 {
10863 /* BKPT. */
10864 record_buf[0] = ARM_PS_REGNUM;
10865 record_buf[1] = ARM_LR_REGNUM;
10866 arm_insn_r->reg_rec_count = 2;
10867
10868 /* Save SPSR also;how? */
10869 printf_unfiltered (_("Process record does not support "
10870 "instruction 0x%0x at address %s.\n"),
10871 arm_insn_r->arm_insn,
10872 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
10873 return -1;
10874 }
10875 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
10876 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
10877 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
10878 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
10879 )
10880 {
10881 if (0 == insn_op1 || 1 == insn_op1)
10882 {
10883 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
10884 /* We dont do optimization for SMULW<y> where we
10885 need only Rd. */
10886 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10887 record_buf[1] = ARM_PS_REGNUM;
10888 arm_insn_r->reg_rec_count = 2;
10889 }
10890 else if (2 == insn_op1)
10891 {
10892 /* SMLAL<x><y>. */
10893 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10894 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
10895 arm_insn_r->reg_rec_count = 2;
10896 }
10897 else if (3 == insn_op1)
10898 {
10899 /* SMUL<x><y>. */
10900 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10901 arm_insn_r->reg_rec_count = 1;
10902 }
10903 }
10904 }
10905 else
10906 {
10907 /* MSR : immediate form. */
10908 if (1 == insn_op1)
10909 {
10910 /* CSPR is going to be changed. */
10911 record_buf[0] = ARM_PS_REGNUM;
10912 arm_insn_r->reg_rec_count = 1;
10913 }
10914 else if (3 == insn_op1)
10915 {
10916 /* SPSR is going to be changed. */
10917 /* we need to get SPSR value, which is yet to be done */
10918 printf_unfiltered (_("Process record does not support "
10919 "instruction 0x%0x at address %s.\n"),
10920 arm_insn_r->arm_insn,
10921 paddress (arm_insn_r->gdbarch,
10922 arm_insn_r->this_addr));
10923 return -1;
10924 }
10925 }
10926 }
10927
10928 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10929 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
10930 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
10931
10932 /* Handle load/store insn extension space. */
10933
10934 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
10935 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
10936 && !INSN_RECORDED(arm_insn_r))
10937 {
10938 /* SWP/SWPB. */
10939 if (0 == insn_op1)
10940 {
10941 /* These insn, changes register and memory as well. */
10942 /* SWP or SWPB insn. */
10943 /* Get memory address given by Rn. */
10944 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10945 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
10946 /* SWP insn ?, swaps word. */
10947 if (8 == arm_insn_r->opcode)
10948 {
10949 record_buf_mem[0] = 4;
10950 }
10951 else
10952 {
10953 /* SWPB insn, swaps only byte. */
10954 record_buf_mem[0] = 1;
10955 }
10956 record_buf_mem[1] = u_regval;
10957 arm_insn_r->mem_rec_count = 1;
10958 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10959 arm_insn_r->reg_rec_count = 1;
10960 }
10961 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10962 {
10963 /* STRH. */
10964 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10965 ARM_RECORD_STRH);
10966 }
10967 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10968 {
10969 /* LDRD. */
10970 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10971 record_buf[1] = record_buf[0] + 1;
10972 arm_insn_r->reg_rec_count = 2;
10973 }
10974 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
10975 {
10976 /* STRD. */
10977 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
10978 ARM_RECORD_STRD);
10979 }
10980 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
10981 {
10982 /* LDRH, LDRSB, LDRSH. */
10983 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10984 arm_insn_r->reg_rec_count = 1;
10985 }
10986
10987 }
10988
10989 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
10990 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
10991 && !INSN_RECORDED(arm_insn_r))
10992 {
10993 ret = -1;
10994 /* Handle coprocessor insn extension space. */
10995 }
10996
10997 /* To be done for ARMv5 and later; as of now we return -1. */
10998 if (-1 == ret)
10999 printf_unfiltered (_("Process record does not support instruction x%0x "
11000 "at address %s.\n"),arm_insn_r->arm_insn,
11001 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11002
11003
11004 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11005 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11006
11007 return ret;
11008 }
11009
11010 /* Handling opcode 000 insns. */
11011
11012 static int
11013 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
11014 {
11015 struct regcache *reg_cache = arm_insn_r->regcache;
11016 uint32_t record_buf[8], record_buf_mem[8];
11017 ULONGEST u_regval[2] = {0};
11018
11019 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11020 uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
11021 uint32_t opcode1 = 0;
11022
11023 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11024 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11025 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
11026
11027 /* Data processing insn /multiply insn. */
11028 if (9 == arm_insn_r->decode
11029 && ((4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11030 || (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)))
11031 {
11032 /* Handle multiply instructions. */
11033 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
11034 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
11035 {
11036 /* Handle MLA and MUL. */
11037 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11038 record_buf[1] = ARM_PS_REGNUM;
11039 arm_insn_r->reg_rec_count = 2;
11040 }
11041 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11042 {
11043 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
11044 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11045 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11046 record_buf[2] = ARM_PS_REGNUM;
11047 arm_insn_r->reg_rec_count = 3;
11048 }
11049 }
11050 else if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11051 && (11 == arm_insn_r->decode || 13 == arm_insn_r->decode))
11052 {
11053 /* Handle misc load insns, as 20th bit (L = 1). */
11054 /* LDR insn has a capability to do branching, if
11055 MOV LR, PC is precceded by LDR insn having Rn as R15
11056 in that case, it emulates branch and link insn, and hence we
11057 need to save CSPR and PC as well. I am not sure this is right
11058 place; as opcode = 010 LDR insn make this happen, if R15 was
11059 used. */
11060 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11061 if (15 != reg_dest)
11062 {
11063 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11064 arm_insn_r->reg_rec_count = 1;
11065 }
11066 else
11067 {
11068 record_buf[0] = reg_dest;
11069 record_buf[1] = ARM_PS_REGNUM;
11070 arm_insn_r->reg_rec_count = 2;
11071 }
11072 }
11073 else if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11074 && sbo_sbz (arm_insn_r->arm_insn, 5, 12, 0)
11075 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11076 && 2 == bits (arm_insn_r->arm_insn, 20, 21))
11077 {
11078 /* Handle MSR insn. */
11079 if (9 == arm_insn_r->opcode)
11080 {
11081 /* CSPR is going to be changed. */
11082 record_buf[0] = ARM_PS_REGNUM;
11083 arm_insn_r->reg_rec_count = 1;
11084 }
11085 else
11086 {
11087 /* SPSR is going to be changed. */
11088 /* How to read SPSR value? */
11089 printf_unfiltered (_("Process record does not support instruction "
11090 "0x%0x at address %s.\n"),
11091 arm_insn_r->arm_insn,
11092 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11093 return -1;
11094 }
11095 }
11096 else if (9 == arm_insn_r->decode
11097 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11098 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11099 {
11100 /* Handling SWP, SWPB. */
11101 /* These insn, changes register and memory as well. */
11102 /* SWP or SWPB insn. */
11103
11104 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11105 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11106 /* SWP insn ?, swaps word. */
11107 if (8 == arm_insn_r->opcode)
11108 {
11109 record_buf_mem[0] = 4;
11110 }
11111 else
11112 {
11113 /* SWPB insn, swaps only byte. */
11114 record_buf_mem[0] = 1;
11115 }
11116 record_buf_mem[1] = u_regval[0];
11117 arm_insn_r->mem_rec_count = 1;
11118 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11119 arm_insn_r->reg_rec_count = 1;
11120 }
11121 else if (3 == arm_insn_r->decode && 0x12 == opcode1
11122 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11123 {
11124 /* Handle BLX, branch and link/exchange. */
11125 if (9 == arm_insn_r->opcode)
11126 {
11127 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
11128 and R14 stores the return address. */
11129 record_buf[0] = ARM_PS_REGNUM;
11130 record_buf[1] = ARM_LR_REGNUM;
11131 arm_insn_r->reg_rec_count = 2;
11132 }
11133 }
11134 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
11135 {
11136 /* Handle enhanced software breakpoint insn, BKPT. */
11137 /* CPSR is changed to be executed in ARM state, disabling normal
11138 interrupts, entering abort mode. */
11139 /* According to high vector configuration PC is set. */
11140 /* user hit breakpoint and type reverse, in
11141 that case, we need to go back with previous CPSR and
11142 Program Counter. */
11143 record_buf[0] = ARM_PS_REGNUM;
11144 record_buf[1] = ARM_LR_REGNUM;
11145 arm_insn_r->reg_rec_count = 2;
11146
11147 /* Save SPSR also; how? */
11148 printf_unfiltered (_("Process record does not support instruction "
11149 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11150 paddress (arm_insn_r->gdbarch,
11151 arm_insn_r->this_addr));
11152 return -1;
11153 }
11154 else if (11 == arm_insn_r->decode
11155 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11156 {
11157 /* Handle enhanced store insns and DSP insns (e.g. LDRD). */
11158
11159 /* Handle str(x) insn */
11160 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11161 ARM_RECORD_STRH);
11162 }
11163 else if (1 == arm_insn_r->decode && 0x12 == opcode1
11164 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11165 {
11166 /* Handle BX, branch and link/exchange. */
11167 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
11168 record_buf[0] = ARM_PS_REGNUM;
11169 arm_insn_r->reg_rec_count = 1;
11170 }
11171 else if (1 == arm_insn_r->decode && 0x16 == opcode1
11172 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
11173 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
11174 {
11175 /* Count leading zeros: CLZ. */
11176 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11177 arm_insn_r->reg_rec_count = 1;
11178 }
11179 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11180 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11181 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
11182 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0)
11183 )
11184 {
11185 /* Handle MRS insn. */
11186 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11187 arm_insn_r->reg_rec_count = 1;
11188 }
11189 else if (arm_insn_r->opcode <= 15)
11190 {
11191 /* Normal data processing insns. */
11192 /* Out of 11 shifter operands mode, all the insn modifies destination
11193 register, which is specified by 13-16 decode. */
11194 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11195 record_buf[1] = ARM_PS_REGNUM;
11196 arm_insn_r->reg_rec_count = 2;
11197 }
11198 else
11199 {
11200 return -1;
11201 }
11202
11203 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11204 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11205 return 0;
11206 }
11207
11208 /* Handling opcode 001 insns. */
11209
11210 static int
11211 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
11212 {
11213 uint32_t record_buf[8], record_buf_mem[8];
11214
11215 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11216 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11217
11218 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11219 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
11220 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11221 )
11222 {
11223 /* Handle MSR insn. */
11224 if (9 == arm_insn_r->opcode)
11225 {
11226 /* CSPR is going to be changed. */
11227 record_buf[0] = ARM_PS_REGNUM;
11228 arm_insn_r->reg_rec_count = 1;
11229 }
11230 else
11231 {
11232 /* SPSR is going to be changed. */
11233 }
11234 }
11235 else if (arm_insn_r->opcode <= 15)
11236 {
11237 /* Normal data processing insns. */
11238 /* Out of 11 shifter operands mode, all the insn modifies destination
11239 register, which is specified by 13-16 decode. */
11240 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11241 record_buf[1] = ARM_PS_REGNUM;
11242 arm_insn_r->reg_rec_count = 2;
11243 }
11244 else
11245 {
11246 return -1;
11247 }
11248
11249 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11250 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11251 return 0;
11252 }
11253
11254 /* Handling opcode 010 insns. */
11255
11256 static int
11257 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
11258 {
11259 struct regcache *reg_cache = arm_insn_r->regcache;
11260
11261 uint32_t reg_src1 = 0 , reg_dest = 0;
11262 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11263 uint32_t record_buf[8], record_buf_mem[8];
11264
11265 ULONGEST u_regval = 0;
11266
11267 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11268 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11269
11270 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11271 {
11272 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11273 /* LDR insn has a capability to do branching, if
11274 MOV LR, PC is precedded by LDR insn having Rn as R15
11275 in that case, it emulates branch and link insn, and hence we
11276 need to save CSPR and PC as well. */
11277 if (ARM_PC_REGNUM != reg_dest)
11278 {
11279 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11280 arm_insn_r->reg_rec_count = 1;
11281 }
11282 else
11283 {
11284 record_buf[0] = reg_dest;
11285 record_buf[1] = ARM_PS_REGNUM;
11286 arm_insn_r->reg_rec_count = 2;
11287 }
11288 }
11289 else
11290 {
11291 /* Store, immediate offset, immediate pre-indexed,
11292 immediate post-indexed. */
11293 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11294 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
11295 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11296 /* U == 1 */
11297 if (bit (arm_insn_r->arm_insn, 23))
11298 {
11299 tgt_mem_addr = u_regval + offset_12;
11300 }
11301 else
11302 {
11303 tgt_mem_addr = u_regval - offset_12;
11304 }
11305
11306 switch (arm_insn_r->opcode)
11307 {
11308 /* STR. */
11309 case 8:
11310 case 12:
11311 /* STR. */
11312 case 9:
11313 case 13:
11314 /* STRT. */
11315 case 1:
11316 case 5:
11317 /* STR. */
11318 case 4:
11319 case 0:
11320 record_buf_mem[0] = 4;
11321 break;
11322
11323 /* STRB. */
11324 case 10:
11325 case 14:
11326 /* STRB. */
11327 case 11:
11328 case 15:
11329 /* STRBT. */
11330 case 3:
11331 case 7:
11332 /* STRB. */
11333 case 2:
11334 case 6:
11335 record_buf_mem[0] = 1;
11336 break;
11337
11338 default:
11339 gdb_assert_not_reached ("no decoding pattern found");
11340 break;
11341 }
11342 record_buf_mem[1] = tgt_mem_addr;
11343 arm_insn_r->mem_rec_count = 1;
11344
11345 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11346 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11347 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11348 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11349 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11350 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11351 )
11352 {
11353 /* We are handling pre-indexed mode; post-indexed mode;
11354 where Rn is going to be changed. */
11355 record_buf[0] = reg_src1;
11356 arm_insn_r->reg_rec_count = 1;
11357 }
11358 }
11359
11360 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11361 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11362 return 0;
11363 }
11364
11365 /* Handling opcode 011 insns. */
11366
11367 static int
11368 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
11369 {
11370 struct regcache *reg_cache = arm_insn_r->regcache;
11371
11372 uint32_t shift_imm = 0;
11373 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11374 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11375 uint32_t record_buf[8], record_buf_mem[8];
11376
11377 LONGEST s_word;
11378 ULONGEST u_regval[2];
11379
11380 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11381 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11382
11383 /* Handle enhanced store insns and LDRD DSP insn,
11384 order begins according to addressing modes for store insns
11385 STRH insn. */
11386
11387 /* LDR or STR? */
11388 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11389 {
11390 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11391 /* LDR insn has a capability to do branching, if
11392 MOV LR, PC is precedded by LDR insn having Rn as R15
11393 in that case, it emulates branch and link insn, and hence we
11394 need to save CSPR and PC as well. */
11395 if (15 != reg_dest)
11396 {
11397 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11398 arm_insn_r->reg_rec_count = 1;
11399 }
11400 else
11401 {
11402 record_buf[0] = reg_dest;
11403 record_buf[1] = ARM_PS_REGNUM;
11404 arm_insn_r->reg_rec_count = 2;
11405 }
11406 }
11407 else
11408 {
11409 if (! bits (arm_insn_r->arm_insn, 4, 11))
11410 {
11411 /* Store insn, register offset and register pre-indexed,
11412 register post-indexed. */
11413 /* Get Rm. */
11414 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11415 /* Get Rn. */
11416 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11417 regcache_raw_read_unsigned (reg_cache, reg_src1
11418 , &u_regval[0]);
11419 regcache_raw_read_unsigned (reg_cache, reg_src2
11420 , &u_regval[1]);
11421 if (15 == reg_src2)
11422 {
11423 /* If R15 was used as Rn, hence current PC+8. */
11424 /* Pre-indexed mode doesnt reach here ; illegal insn. */
11425 u_regval[0] = u_regval[0] + 8;
11426 }
11427 /* Calculate target store address, Rn +/- Rm, register offset. */
11428 /* U == 1. */
11429 if (bit (arm_insn_r->arm_insn, 23))
11430 {
11431 tgt_mem_addr = u_regval[0] + u_regval[1];
11432 }
11433 else
11434 {
11435 tgt_mem_addr = u_regval[1] - u_regval[0];
11436 }
11437
11438 switch (arm_insn_r->opcode)
11439 {
11440 /* STR. */
11441 case 8:
11442 case 12:
11443 /* STR. */
11444 case 9:
11445 case 13:
11446 /* STRT. */
11447 case 1:
11448 case 5:
11449 /* STR. */
11450 case 0:
11451 case 4:
11452 record_buf_mem[0] = 4;
11453 break;
11454
11455 /* STRB. */
11456 case 10:
11457 case 14:
11458 /* STRB. */
11459 case 11:
11460 case 15:
11461 /* STRBT. */
11462 case 3:
11463 case 7:
11464 /* STRB. */
11465 case 2:
11466 case 6:
11467 record_buf_mem[0] = 1;
11468 break;
11469
11470 default:
11471 gdb_assert_not_reached ("no decoding pattern found");
11472 break;
11473 }
11474 record_buf_mem[1] = tgt_mem_addr;
11475 arm_insn_r->mem_rec_count = 1;
11476
11477 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11478 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11479 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11480 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11481 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11482 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11483 )
11484 {
11485 /* Rn is going to be changed in pre-indexed mode and
11486 post-indexed mode as well. */
11487 record_buf[0] = reg_src2;
11488 arm_insn_r->reg_rec_count = 1;
11489 }
11490 }
11491 else
11492 {
11493 /* Store insn, scaled register offset; scaled pre-indexed. */
11494 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
11495 /* Get Rm. */
11496 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11497 /* Get Rn. */
11498 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11499 /* Get shift_imm. */
11500 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
11501 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11502 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
11503 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11504 /* Offset_12 used as shift. */
11505 switch (offset_12)
11506 {
11507 case 0:
11508 /* Offset_12 used as index. */
11509 offset_12 = u_regval[0] << shift_imm;
11510 break;
11511
11512 case 1:
11513 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
11514 break;
11515
11516 case 2:
11517 if (!shift_imm)
11518 {
11519 if (bit (u_regval[0], 31))
11520 {
11521 offset_12 = 0xFFFFFFFF;
11522 }
11523 else
11524 {
11525 offset_12 = 0;
11526 }
11527 }
11528 else
11529 {
11530 /* This is arithmetic shift. */
11531 offset_12 = s_word >> shift_imm;
11532 }
11533 break;
11534
11535 case 3:
11536 if (!shift_imm)
11537 {
11538 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
11539 &u_regval[1]);
11540 /* Get C flag value and shift it by 31. */
11541 offset_12 = (((bit (u_regval[1], 29)) << 31) \
11542 | (u_regval[0]) >> 1);
11543 }
11544 else
11545 {
11546 offset_12 = (u_regval[0] >> shift_imm) \
11547 | (u_regval[0] <<
11548 (sizeof(uint32_t) - shift_imm));
11549 }
11550 break;
11551
11552 default:
11553 gdb_assert_not_reached ("no decoding pattern found");
11554 break;
11555 }
11556
11557 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11558 /* bit U set. */
11559 if (bit (arm_insn_r->arm_insn, 23))
11560 {
11561 tgt_mem_addr = u_regval[1] + offset_12;
11562 }
11563 else
11564 {
11565 tgt_mem_addr = u_regval[1] - offset_12;
11566 }
11567
11568 switch (arm_insn_r->opcode)
11569 {
11570 /* STR. */
11571 case 8:
11572 case 12:
11573 /* STR. */
11574 case 9:
11575 case 13:
11576 /* STRT. */
11577 case 1:
11578 case 5:
11579 /* STR. */
11580 case 0:
11581 case 4:
11582 record_buf_mem[0] = 4;
11583 break;
11584
11585 /* STRB. */
11586 case 10:
11587 case 14:
11588 /* STRB. */
11589 case 11:
11590 case 15:
11591 /* STRBT. */
11592 case 3:
11593 case 7:
11594 /* STRB. */
11595 case 2:
11596 case 6:
11597 record_buf_mem[0] = 1;
11598 break;
11599
11600 default:
11601 gdb_assert_not_reached ("no decoding pattern found");
11602 break;
11603 }
11604 record_buf_mem[1] = tgt_mem_addr;
11605 arm_insn_r->mem_rec_count = 1;
11606
11607 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11608 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11609 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11610 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11611 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11612 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11613 )
11614 {
11615 /* Rn is going to be changed in register scaled pre-indexed
11616 mode,and scaled post indexed mode. */
11617 record_buf[0] = reg_src2;
11618 arm_insn_r->reg_rec_count = 1;
11619 }
11620 }
11621 }
11622
11623 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11624 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11625 return 0;
11626 }
11627
11628 /* Handling opcode 100 insns. */
11629
11630 static int
11631 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
11632 {
11633 struct regcache *reg_cache = arm_insn_r->regcache;
11634
11635 uint32_t register_list[16] = {0}, register_count = 0, register_bits = 0;
11636 uint32_t reg_src1 = 0, addr_mode = 0, no_of_regs = 0;
11637 uint32_t start_address = 0, index = 0;
11638 uint32_t record_buf[24], record_buf_mem[48];
11639
11640 ULONGEST u_regval[2] = {0};
11641
11642 /* This mode is exclusively for load and store multiple. */
11643 /* Handle incremenrt after/before and decrment after.before mode;
11644 Rn is changing depending on W bit, but as of now we store Rn too
11645 without optimization. */
11646
11647 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11648 {
11649 /* LDM (1,2,3) where LDM (3) changes CPSR too. */
11650
11651 if (bit (arm_insn_r->arm_insn, 20) && !bit (arm_insn_r->arm_insn, 22))
11652 {
11653 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11654 no_of_regs = 15;
11655 }
11656 else
11657 {
11658 register_bits = bits (arm_insn_r->arm_insn, 0, 14);
11659 no_of_regs = 14;
11660 }
11661 /* Get Rn. */
11662 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11663 while (register_bits)
11664 {
11665 if (register_bits & 0x00000001)
11666 register_list[register_count++] = 1;
11667 register_bits = register_bits >> 1;
11668 }
11669
11670 /* Extra space for Base Register and CPSR; wihtout optimization. */
11671 record_buf[register_count] = reg_src1;
11672 record_buf[register_count + 1] = ARM_PS_REGNUM;
11673 arm_insn_r->reg_rec_count = register_count + 2;
11674
11675 for (register_count = 0; register_count < no_of_regs; register_count++)
11676 {
11677 if (register_list[register_count])
11678 {
11679 /* Register_count gives total no of registers
11680 and dually working as reg number. */
11681 record_buf[index] = register_count;
11682 index++;
11683 }
11684 }
11685
11686 }
11687 else
11688 {
11689 /* It handles both STM(1) and STM(2). */
11690 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11691
11692 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11693 /* Get Rn. */
11694 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11695 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11696 while (register_bits)
11697 {
11698 if (register_bits & 0x00000001)
11699 register_count++;
11700 register_bits = register_bits >> 1;
11701 }
11702
11703 switch (addr_mode)
11704 {
11705 /* Decrement after. */
11706 case 0:
11707 start_address = (u_regval[0]) - (register_count * 4) + 4;
11708 arm_insn_r->mem_rec_count = register_count;
11709 while (register_count)
11710 {
11711 record_buf_mem[(register_count * 2) - 1] = start_address;
11712 record_buf_mem[(register_count * 2) - 2] = 4;
11713 start_address = start_address + 4;
11714 register_count--;
11715 }
11716 break;
11717
11718 /* Increment after. */
11719 case 1:
11720 start_address = u_regval[0];
11721 arm_insn_r->mem_rec_count = register_count;
11722 while (register_count)
11723 {
11724 record_buf_mem[(register_count * 2) - 1] = start_address;
11725 record_buf_mem[(register_count * 2) - 2] = 4;
11726 start_address = start_address + 4;
11727 register_count--;
11728 }
11729 break;
11730
11731 /* Decrement before. */
11732 case 2:
11733
11734 start_address = (u_regval[0]) - (register_count * 4);
11735 arm_insn_r->mem_rec_count = register_count;
11736 while (register_count)
11737 {
11738 record_buf_mem[(register_count * 2) - 1] = start_address;
11739 record_buf_mem[(register_count * 2) - 2] = 4;
11740 start_address = start_address + 4;
11741 register_count--;
11742 }
11743 break;
11744
11745 /* Increment before. */
11746 case 3:
11747 start_address = u_regval[0] + 4;
11748 arm_insn_r->mem_rec_count = register_count;
11749 while (register_count)
11750 {
11751 record_buf_mem[(register_count * 2) - 1] = start_address;
11752 record_buf_mem[(register_count * 2) - 2] = 4;
11753 start_address = start_address + 4;
11754 register_count--;
11755 }
11756 break;
11757
11758 default:
11759 gdb_assert_not_reached ("no decoding pattern found");
11760 break;
11761 }
11762
11763 /* Base register also changes; based on condition and W bit. */
11764 /* We save it anyway without optimization. */
11765 record_buf[0] = reg_src1;
11766 arm_insn_r->reg_rec_count = 1;
11767 }
11768
11769 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11770 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11771 return 0;
11772 }
11773
11774 /* Handling opcode 101 insns. */
11775
11776 static int
11777 arm_record_b_bl (insn_decode_record *arm_insn_r)
11778 {
11779 uint32_t record_buf[8];
11780
11781 /* Handle B, BL, BLX(1) insns. */
11782 /* B simply branches so we do nothing here. */
11783 /* Note: BLX(1) doesnt fall here but instead it falls into
11784 extension space. */
11785 if (bit (arm_insn_r->arm_insn, 24))
11786 {
11787 record_buf[0] = ARM_LR_REGNUM;
11788 arm_insn_r->reg_rec_count = 1;
11789 }
11790
11791 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11792
11793 return 0;
11794 }
11795
11796 /* Handling opcode 110 insns. */
11797
11798 static int
11799 arm_record_coproc (insn_decode_record *arm_insn_r)
11800 {
11801 printf_unfiltered (_("Process record does not support instruction "
11802 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11803 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11804
11805 return -1;
11806 }
11807
11808 /* Handling opcode 111 insns. */
11809
11810 static int
11811 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11812 {
11813 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11814 struct regcache *reg_cache = arm_insn_r->regcache;
11815 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
11816
11817 /* Handle SWI insn; system call would be handled over here. */
11818
11819 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
11820 if (15 == arm_insn_r->opcode)
11821 {
11822 /* Handle arm syscall insn. */
11823 if (tdep->arm_swi_record != NULL)
11824 {
11825 ret = tdep->arm_swi_record(reg_cache);
11826 }
11827 else
11828 {
11829 printf_unfiltered (_("no syscall record support\n"));
11830 ret = -1;
11831 }
11832 }
11833
11834 printf_unfiltered (_("Process record does not support instruction "
11835 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11836 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11837 return ret;
11838 }
11839
11840 /* Handling opcode 000 insns. */
11841
11842 static int
11843 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
11844 {
11845 uint32_t record_buf[8];
11846 uint32_t reg_src1 = 0;
11847
11848 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11849
11850 record_buf[0] = ARM_PS_REGNUM;
11851 record_buf[1] = reg_src1;
11852 thumb_insn_r->reg_rec_count = 2;
11853
11854 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11855
11856 return 0;
11857 }
11858
11859
11860 /* Handling opcode 001 insns. */
11861
11862 static int
11863 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
11864 {
11865 uint32_t record_buf[8];
11866 uint32_t reg_src1 = 0;
11867
11868 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11869
11870 record_buf[0] = ARM_PS_REGNUM;
11871 record_buf[1] = reg_src1;
11872 thumb_insn_r->reg_rec_count = 2;
11873
11874 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11875
11876 return 0;
11877 }
11878
11879 /* Handling opcode 010 insns. */
11880
11881 static int
11882 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
11883 {
11884 struct regcache *reg_cache = thumb_insn_r->regcache;
11885 uint32_t record_buf[8], record_buf_mem[8];
11886
11887 uint32_t reg_src1 = 0, reg_src2 = 0;
11888 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
11889
11890 ULONGEST u_regval[2] = {0};
11891
11892 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
11893
11894 if (bit (thumb_insn_r->arm_insn, 12))
11895 {
11896 /* Handle load/store register offset. */
11897 opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
11898 if (opcode2 >= 12 && opcode2 <= 15)
11899 {
11900 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
11901 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
11902 record_buf[0] = reg_src1;
11903 thumb_insn_r->reg_rec_count = 1;
11904 }
11905 else if (opcode2 >= 8 && opcode2 <= 10)
11906 {
11907 /* STR(2), STRB(2), STRH(2) . */
11908 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11909 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
11910 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11911 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11912 if (8 == opcode2)
11913 record_buf_mem[0] = 4; /* STR (2). */
11914 else if (10 == opcode2)
11915 record_buf_mem[0] = 1; /* STRB (2). */
11916 else if (9 == opcode2)
11917 record_buf_mem[0] = 2; /* STRH (2). */
11918 record_buf_mem[1] = u_regval[0] + u_regval[1];
11919 thumb_insn_r->mem_rec_count = 1;
11920 }
11921 }
11922 else if (bit (thumb_insn_r->arm_insn, 11))
11923 {
11924 /* Handle load from literal pool. */
11925 /* LDR(3). */
11926 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11927 record_buf[0] = reg_src1;
11928 thumb_insn_r->reg_rec_count = 1;
11929 }
11930 else if (opcode1)
11931 {
11932 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
11933 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
11934 if ((3 == opcode2) && (!opcode3))
11935 {
11936 /* Branch with exchange. */
11937 record_buf[0] = ARM_PS_REGNUM;
11938 thumb_insn_r->reg_rec_count = 1;
11939 }
11940 else
11941 {
11942 /* Format 8; special data processing insns. */
11943 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11944 record_buf[0] = ARM_PS_REGNUM;
11945 record_buf[1] = reg_src1;
11946 thumb_insn_r->reg_rec_count = 2;
11947 }
11948 }
11949 else
11950 {
11951 /* Format 5; data processing insns. */
11952 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11953 if (bit (thumb_insn_r->arm_insn, 7))
11954 {
11955 reg_src1 = reg_src1 + 8;
11956 }
11957 record_buf[0] = ARM_PS_REGNUM;
11958 record_buf[1] = reg_src1;
11959 thumb_insn_r->reg_rec_count = 2;
11960 }
11961
11962 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11963 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
11964 record_buf_mem);
11965
11966 return 0;
11967 }
11968
11969 /* Handling opcode 001 insns. */
11970
11971 static int
11972 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
11973 {
11974 struct regcache *reg_cache = thumb_insn_r->regcache;
11975 uint32_t record_buf[8], record_buf_mem[8];
11976
11977 uint32_t reg_src1 = 0;
11978 uint32_t opcode = 0, immed_5 = 0;
11979
11980 ULONGEST u_regval = 0;
11981
11982 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
11983
11984 if (opcode)
11985 {
11986 /* LDR(1). */
11987 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11988 record_buf[0] = reg_src1;
11989 thumb_insn_r->reg_rec_count = 1;
11990 }
11991 else
11992 {
11993 /* STR(1). */
11994 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
11995 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
11996 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11997 record_buf_mem[0] = 4;
11998 record_buf_mem[1] = u_regval + (immed_5 * 4);
11999 thumb_insn_r->mem_rec_count = 1;
12000 }
12001
12002 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12003 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12004 record_buf_mem);
12005
12006 return 0;
12007 }
12008
12009 /* Handling opcode 100 insns. */
12010
12011 static int
12012 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
12013 {
12014 struct regcache *reg_cache = thumb_insn_r->regcache;
12015 uint32_t record_buf[8], record_buf_mem[8];
12016
12017 uint32_t reg_src1 = 0;
12018 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
12019
12020 ULONGEST u_regval = 0;
12021
12022 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12023
12024 if (3 == opcode)
12025 {
12026 /* LDR(4). */
12027 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12028 record_buf[0] = reg_src1;
12029 thumb_insn_r->reg_rec_count = 1;
12030 }
12031 else if (1 == opcode)
12032 {
12033 /* LDRH(1). */
12034 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12035 record_buf[0] = reg_src1;
12036 thumb_insn_r->reg_rec_count = 1;
12037 }
12038 else if (2 == opcode)
12039 {
12040 /* STR(3). */
12041 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12042 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12043 record_buf_mem[0] = 4;
12044 record_buf_mem[1] = u_regval + (immed_8 * 4);
12045 thumb_insn_r->mem_rec_count = 1;
12046 }
12047 else if (0 == opcode)
12048 {
12049 /* STRH(1). */
12050 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12051 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12052 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12053 record_buf_mem[0] = 2;
12054 record_buf_mem[1] = u_regval + (immed_5 * 2);
12055 thumb_insn_r->mem_rec_count = 1;
12056 }
12057
12058 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12059 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12060 record_buf_mem);
12061
12062 return 0;
12063 }
12064
12065 /* Handling opcode 101 insns. */
12066
12067 static int
12068 thumb_record_misc (insn_decode_record *thumb_insn_r)
12069 {
12070 struct regcache *reg_cache = thumb_insn_r->regcache;
12071
12072 uint32_t opcode = 0, opcode1 = 0, opcode2 = 0;
12073 uint32_t register_bits = 0, register_count = 0;
12074 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12075 uint32_t record_buf[24], record_buf_mem[48];
12076 uint32_t reg_src1;
12077
12078 ULONGEST u_regval = 0;
12079
12080 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12081 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12082 opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
12083
12084 if (14 == opcode2)
12085 {
12086 /* POP. */
12087 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12088 while (register_bits)
12089 {
12090 if (register_bits & 0x00000001)
12091 register_list[register_count++] = 1;
12092 register_bits = register_bits >> 1;
12093 }
12094 record_buf[register_count] = ARM_PS_REGNUM;
12095 record_buf[register_count + 1] = ARM_SP_REGNUM;
12096 thumb_insn_r->reg_rec_count = register_count + 2;
12097 for (register_count = 0; register_count < 8; register_count++)
12098 {
12099 if (register_list[register_count])
12100 {
12101 record_buf[index] = register_count;
12102 index++;
12103 }
12104 }
12105 }
12106 else if (10 == opcode2)
12107 {
12108 /* PUSH. */
12109 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12110 regcache_raw_read_unsigned (reg_cache, ARM_PC_REGNUM, &u_regval);
12111 while (register_bits)
12112 {
12113 if (register_bits & 0x00000001)
12114 register_count++;
12115 register_bits = register_bits >> 1;
12116 }
12117 start_address = u_regval - \
12118 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12119 thumb_insn_r->mem_rec_count = register_count;
12120 while (register_count)
12121 {
12122 record_buf_mem[(register_count * 2) - 1] = start_address;
12123 record_buf_mem[(register_count * 2) - 2] = 4;
12124 start_address = start_address + 4;
12125 register_count--;
12126 }
12127 record_buf[0] = ARM_SP_REGNUM;
12128 thumb_insn_r->reg_rec_count = 1;
12129 }
12130 else if (0x1E == opcode1)
12131 {
12132 /* BKPT insn. */
12133 /* Handle enhanced software breakpoint insn, BKPT. */
12134 /* CPSR is changed to be executed in ARM state, disabling normal
12135 interrupts, entering abort mode. */
12136 /* According to high vector configuration PC is set. */
12137 /* User hits breakpoint and type reverse, in that case, we need to go back with
12138 previous CPSR and Program Counter. */
12139 record_buf[0] = ARM_PS_REGNUM;
12140 record_buf[1] = ARM_LR_REGNUM;
12141 thumb_insn_r->reg_rec_count = 2;
12142 /* We need to save SPSR value, which is not yet done. */
12143 printf_unfiltered (_("Process record does not support instruction "
12144 "0x%0x at address %s.\n"),
12145 thumb_insn_r->arm_insn,
12146 paddress (thumb_insn_r->gdbarch,
12147 thumb_insn_r->this_addr));
12148 return -1;
12149 }
12150 else if ((0 == opcode) || (1 == opcode))
12151 {
12152 /* ADD(5), ADD(6). */
12153 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12154 record_buf[0] = reg_src1;
12155 thumb_insn_r->reg_rec_count = 1;
12156 }
12157 else if (2 == opcode)
12158 {
12159 /* ADD(7), SUB(4). */
12160 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12161 record_buf[0] = ARM_SP_REGNUM;
12162 thumb_insn_r->reg_rec_count = 1;
12163 }
12164
12165 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12166 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12167 record_buf_mem);
12168
12169 return 0;
12170 }
12171
12172 /* Handling opcode 110 insns. */
12173
12174 static int
12175 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12176 {
12177 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12178 struct regcache *reg_cache = thumb_insn_r->regcache;
12179
12180 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12181 uint32_t reg_src1 = 0;
12182 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12183 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12184 uint32_t record_buf[24], record_buf_mem[48];
12185
12186 ULONGEST u_regval = 0;
12187
12188 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12189 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12190
12191 if (1 == opcode2)
12192 {
12193
12194 /* LDMIA. */
12195 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12196 /* Get Rn. */
12197 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12198 while (register_bits)
12199 {
12200 if (register_bits & 0x00000001)
12201 register_list[register_count++] = 1;
12202 register_bits = register_bits >> 1;
12203 }
12204 record_buf[register_count] = reg_src1;
12205 thumb_insn_r->reg_rec_count = register_count + 1;
12206 for (register_count = 0; register_count < 8; register_count++)
12207 {
12208 if (register_list[register_count])
12209 {
12210 record_buf[index] = register_count;
12211 index++;
12212 }
12213 }
12214 }
12215 else if (0 == opcode2)
12216 {
12217 /* It handles both STMIA. */
12218 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12219 /* Get Rn. */
12220 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12221 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12222 while (register_bits)
12223 {
12224 if (register_bits & 0x00000001)
12225 register_count++;
12226 register_bits = register_bits >> 1;
12227 }
12228 start_address = u_regval;
12229 thumb_insn_r->mem_rec_count = register_count;
12230 while (register_count)
12231 {
12232 record_buf_mem[(register_count * 2) - 1] = start_address;
12233 record_buf_mem[(register_count * 2) - 2] = 4;
12234 start_address = start_address + 4;
12235 register_count--;
12236 }
12237 }
12238 else if (0x1F == opcode1)
12239 {
12240 /* Handle arm syscall insn. */
12241 if (tdep->arm_swi_record != NULL)
12242 {
12243 ret = tdep->arm_swi_record(reg_cache);
12244 }
12245 else
12246 {
12247 printf_unfiltered (_("no syscall record support\n"));
12248 return -1;
12249 }
12250 }
12251
12252 /* B (1), conditional branch is automatically taken care in process_record,
12253 as PC is saved there. */
12254
12255 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12256 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12257 record_buf_mem);
12258
12259 return ret;
12260 }
12261
12262 /* Handling opcode 111 insns. */
12263
12264 static int
12265 thumb_record_branch (insn_decode_record *thumb_insn_r)
12266 {
12267 uint32_t record_buf[8];
12268 uint32_t bits_h = 0;
12269
12270 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12271
12272 if (2 == bits_h || 3 == bits_h)
12273 {
12274 /* BL */
12275 record_buf[0] = ARM_LR_REGNUM;
12276 thumb_insn_r->reg_rec_count = 1;
12277 }
12278 else if (1 == bits_h)
12279 {
12280 /* BLX(1). */
12281 record_buf[0] = ARM_PS_REGNUM;
12282 record_buf[1] = ARM_LR_REGNUM;
12283 thumb_insn_r->reg_rec_count = 2;
12284 }
12285
12286 /* B(2) is automatically taken care in process_record, as PC is
12287 saved there. */
12288
12289 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12290
12291 return 0;
12292 }
12293
12294
12295 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
12296 and positive val on fauilure. */
12297
12298 static int
12299 extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size)
12300 {
12301 gdb_byte buf[insn_size];
12302
12303 memset (&buf[0], 0, insn_size);
12304
12305 if (target_read_memory (insn_record->this_addr, &buf[0], insn_size))
12306 return 1;
12307 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
12308 insn_size,
12309 gdbarch_byte_order (insn_record->gdbarch));
12310 return 0;
12311 }
12312
12313 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
12314
12315 /* Decode arm/thumb insn depending on condition cods and opcodes; and
12316 dispatch it. */
12317
12318 static int
12319 decode_insn (insn_decode_record *arm_record, record_type_t record_type,
12320 uint32_t insn_size)
12321 {
12322
12323 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */
12324 static const sti_arm_hdl_fp_t const arm_handle_insn[8] =
12325 {
12326 arm_record_data_proc_misc_ld_str, /* 000. */
12327 arm_record_data_proc_imm, /* 001. */
12328 arm_record_ld_st_imm_offset, /* 010. */
12329 arm_record_ld_st_reg_offset, /* 011. */
12330 arm_record_ld_st_multiple, /* 100. */
12331 arm_record_b_bl, /* 101. */
12332 arm_record_coproc, /* 110. */
12333 arm_record_coproc_data_proc /* 111. */
12334 };
12335
12336 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */
12337 static const sti_arm_hdl_fp_t const thumb_handle_insn[8] =
12338 { \
12339 thumb_record_shift_add_sub, /* 000. */
12340 thumb_record_add_sub_cmp_mov, /* 001. */
12341 thumb_record_ld_st_reg_offset, /* 010. */
12342 thumb_record_ld_st_imm_offset, /* 011. */
12343 thumb_record_ld_st_stack, /* 100. */
12344 thumb_record_misc, /* 101. */
12345 thumb_record_ldm_stm_swi, /* 110. */
12346 thumb_record_branch /* 111. */
12347 };
12348
12349 uint32_t ret = 0; /* return value: negative:failure 0:success. */
12350 uint32_t insn_id = 0;
12351
12352 if (extract_arm_insn (arm_record, insn_size))
12353 {
12354 if (record_debug)
12355 {
12356 printf_unfiltered (_("Process record: error reading memory at "
12357 "addr %s len = %d.\n"),
12358 paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
12359 }
12360 return -1;
12361 }
12362 else if (ARM_RECORD == record_type)
12363 {
12364 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
12365 insn_id = bits (arm_record->arm_insn, 25, 27);
12366 ret = arm_record_extension_space (arm_record);
12367 /* If this insn has fallen into extension space
12368 then we need not decode it anymore. */
12369 if (ret != -1 && !INSN_RECORDED(arm_record))
12370 {
12371 ret = arm_handle_insn[insn_id] (arm_record);
12372 }
12373 }
12374 else if (THUMB_RECORD == record_type)
12375 {
12376 /* As thumb does not have condition codes, we set negative. */
12377 arm_record->cond = -1;
12378 insn_id = bits (arm_record->arm_insn, 13, 15);
12379 ret = thumb_handle_insn[insn_id] (arm_record);
12380 }
12381 else if (THUMB2_RECORD == record_type)
12382 {
12383 printf_unfiltered (_("Process record doesnt support thumb32 instruction "
12384 "0x%0x at address %s.\n"),arm_record->arm_insn,
12385 paddress (arm_record->gdbarch,
12386 arm_record->this_addr));
12387 ret = -1;
12388 }
12389 else
12390 {
12391 /* Throw assertion. */
12392 gdb_assert_not_reached ("not a valid instruction, could not decode");
12393 }
12394
12395 return ret;
12396 }
12397
12398
12399 /* Cleans up local record registers and memory allocations. */
12400
12401 static void
12402 deallocate_reg_mem (insn_decode_record *record)
12403 {
12404 xfree (record->arm_regs);
12405 xfree (record->arm_mems);
12406 }
12407
12408
12409 /* Parse the current instruction and record the values of the registers and
12410 memory that will be changed in current instruction to record_arch_list".
12411 Return -1 if something is wrong. */
12412
12413 int
12414 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
12415 CORE_ADDR insn_addr)
12416 {
12417
12418 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
12419 uint32_t no_of_rec = 0;
12420 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
12421 ULONGEST t_bit = 0, insn_id = 0;
12422
12423 ULONGEST u_regval = 0;
12424
12425 insn_decode_record arm_record;
12426
12427 memset (&arm_record, 0, sizeof (insn_decode_record));
12428 arm_record.regcache = regcache;
12429 arm_record.this_addr = insn_addr;
12430 arm_record.gdbarch = gdbarch;
12431
12432
12433 if (record_debug > 1)
12434 {
12435 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
12436 "addr = %s\n",
12437 paddress (gdbarch, arm_record.this_addr));
12438 }
12439
12440 if (extract_arm_insn (&arm_record, 2))
12441 {
12442 if (record_debug)
12443 {
12444 printf_unfiltered (_("Process record: error reading memory at "
12445 "addr %s len = %d.\n"),
12446 paddress (arm_record.gdbarch,
12447 arm_record.this_addr), 2);
12448 }
12449 return -1;
12450 }
12451
12452 /* Check the insn, whether it is thumb or arm one. */
12453
12454 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
12455 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
12456
12457
12458 if (!(u_regval & t_bit))
12459 {
12460 /* We are decoding arm insn. */
12461 ret = decode_insn (&arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
12462 }
12463 else
12464 {
12465 insn_id = bits (arm_record.arm_insn, 11, 15);
12466 /* is it thumb2 insn? */
12467 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
12468 {
12469 ret = decode_insn (&arm_record, THUMB2_RECORD,
12470 THUMB2_INSN_SIZE_BYTES);
12471 }
12472 else
12473 {
12474 /* We are decoding thumb insn. */
12475 ret = decode_insn (&arm_record, THUMB_RECORD, THUMB_INSN_SIZE_BYTES);
12476 }
12477 }
12478
12479 if (0 == ret)
12480 {
12481 /* Record registers. */
12482 record_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
12483 if (arm_record.arm_regs)
12484 {
12485 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
12486 {
12487 if (record_arch_list_add_reg (arm_record.regcache ,
12488 arm_record.arm_regs[no_of_rec]))
12489 ret = -1;
12490 }
12491 }
12492 /* Record memories. */
12493 if (arm_record.arm_mems)
12494 {
12495 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
12496 {
12497 if (record_arch_list_add_mem
12498 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
12499 arm_record.arm_mems[no_of_rec].len))
12500 ret = -1;
12501 }
12502 }
12503
12504 if (record_arch_list_add_end ())
12505 ret = -1;
12506 }
12507
12508
12509 deallocate_reg_mem (&arm_record);
12510
12511 return ret;
12512 }
12513
This page took 0.34808 seconds and 4 git commands to generate.