* inf-child.c (inf_child_follow_fork) New parameter
[deliverable/binutils-gdb.git] / gdb / arm-tdep.c
1 /* Common target dependent code for GDB on ARM systems.
2
3 Copyright (C) 1988-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include <ctype.h> /* XXX for isupper (). */
23
24 #include "frame.h"
25 #include "inferior.h"
26 #include "gdbcmd.h"
27 #include "gdbcore.h"
28 #include "gdb_string.h"
29 #include "dis-asm.h" /* For register styles. */
30 #include "regcache.h"
31 #include "reggroups.h"
32 #include "doublest.h"
33 #include "value.h"
34 #include "arch-utils.h"
35 #include "osabi.h"
36 #include "frame-unwind.h"
37 #include "frame-base.h"
38 #include "trad-frame.h"
39 #include "objfiles.h"
40 #include "dwarf2-frame.h"
41 #include "gdbtypes.h"
42 #include "prologue-value.h"
43 #include "remote.h"
44 #include "target-descriptions.h"
45 #include "user-regs.h"
46 #include "observer.h"
47
48 #include "arm-tdep.h"
49 #include "gdb/sim-arm.h"
50
51 #include "elf-bfd.h"
52 #include "coff/internal.h"
53 #include "elf/arm.h"
54
55 #include "gdb_assert.h"
56 #include "vec.h"
57
58 #include "record.h"
59 #include "record-full.h"
60
61 #include "features/arm-with-m.c"
62 #include "features/arm-with-m-fpa-layout.c"
63 #include "features/arm-with-m-vfp-d16.c"
64 #include "features/arm-with-iwmmxt.c"
65 #include "features/arm-with-vfpv2.c"
66 #include "features/arm-with-vfpv3.c"
67 #include "features/arm-with-neon.c"
68
69 static int arm_debug;
70
71 /* Macros for setting and testing a bit in a minimal symbol that marks
72 it as Thumb function. The MSB of the minimal symbol's "info" field
73 is used for this purpose.
74
75 MSYMBOL_SET_SPECIAL Actually sets the "special" bit.
76 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */
77
78 #define MSYMBOL_SET_SPECIAL(msym) \
79 MSYMBOL_TARGET_FLAG_1 (msym) = 1
80
81 #define MSYMBOL_IS_SPECIAL(msym) \
82 MSYMBOL_TARGET_FLAG_1 (msym)
83
84 /* Per-objfile data used for mapping symbols. */
85 static const struct objfile_data *arm_objfile_data_key;
86
87 struct arm_mapping_symbol
88 {
89 bfd_vma value;
90 char type;
91 };
92 typedef struct arm_mapping_symbol arm_mapping_symbol_s;
93 DEF_VEC_O(arm_mapping_symbol_s);
94
95 struct arm_per_objfile
96 {
97 VEC(arm_mapping_symbol_s) **section_maps;
98 };
99
100 /* The list of available "set arm ..." and "show arm ..." commands. */
101 static struct cmd_list_element *setarmcmdlist = NULL;
102 static struct cmd_list_element *showarmcmdlist = NULL;
103
104 /* The type of floating-point to use. Keep this in sync with enum
105 arm_float_model, and the help string in _initialize_arm_tdep. */
106 static const char *const fp_model_strings[] =
107 {
108 "auto",
109 "softfpa",
110 "fpa",
111 "softvfp",
112 "vfp",
113 NULL
114 };
115
116 /* A variable that can be configured by the user. */
117 static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO;
118 static const char *current_fp_model = "auto";
119
120 /* The ABI to use. Keep this in sync with arm_abi_kind. */
121 static const char *const arm_abi_strings[] =
122 {
123 "auto",
124 "APCS",
125 "AAPCS",
126 NULL
127 };
128
129 /* A variable that can be configured by the user. */
130 static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO;
131 static const char *arm_abi_string = "auto";
132
133 /* The execution mode to assume. */
134 static const char *const arm_mode_strings[] =
135 {
136 "auto",
137 "arm",
138 "thumb",
139 NULL
140 };
141
142 static const char *arm_fallback_mode_string = "auto";
143 static const char *arm_force_mode_string = "auto";
144
145 /* Internal override of the execution mode. -1 means no override,
146 0 means override to ARM mode, 1 means override to Thumb mode.
147 The effect is the same as if arm_force_mode has been set by the
148 user (except the internal override has precedence over a user's
149 arm_force_mode override). */
150 static int arm_override_mode = -1;
151
152 /* Number of different reg name sets (options). */
153 static int num_disassembly_options;
154
155 /* The standard register names, and all the valid aliases for them. Note
156 that `fp', `sp' and `pc' are not added in this alias list, because they
157 have been added as builtin user registers in
158 std-regs.c:_initialize_frame_reg. */
159 static const struct
160 {
161 const char *name;
162 int regnum;
163 } arm_register_aliases[] = {
164 /* Basic register numbers. */
165 { "r0", 0 },
166 { "r1", 1 },
167 { "r2", 2 },
168 { "r3", 3 },
169 { "r4", 4 },
170 { "r5", 5 },
171 { "r6", 6 },
172 { "r7", 7 },
173 { "r8", 8 },
174 { "r9", 9 },
175 { "r10", 10 },
176 { "r11", 11 },
177 { "r12", 12 },
178 { "r13", 13 },
179 { "r14", 14 },
180 { "r15", 15 },
181 /* Synonyms (argument and variable registers). */
182 { "a1", 0 },
183 { "a2", 1 },
184 { "a3", 2 },
185 { "a4", 3 },
186 { "v1", 4 },
187 { "v2", 5 },
188 { "v3", 6 },
189 { "v4", 7 },
190 { "v5", 8 },
191 { "v6", 9 },
192 { "v7", 10 },
193 { "v8", 11 },
194 /* Other platform-specific names for r9. */
195 { "sb", 9 },
196 { "tr", 9 },
197 /* Special names. */
198 { "ip", 12 },
199 { "lr", 14 },
200 /* Names used by GCC (not listed in the ARM EABI). */
201 { "sl", 10 },
202 /* A special name from the older ATPCS. */
203 { "wr", 7 },
204 };
205
206 static const char *const arm_register_names[] =
207 {"r0", "r1", "r2", "r3", /* 0 1 2 3 */
208 "r4", "r5", "r6", "r7", /* 4 5 6 7 */
209 "r8", "r9", "r10", "r11", /* 8 9 10 11 */
210 "r12", "sp", "lr", "pc", /* 12 13 14 15 */
211 "f0", "f1", "f2", "f3", /* 16 17 18 19 */
212 "f4", "f5", "f6", "f7", /* 20 21 22 23 */
213 "fps", "cpsr" }; /* 24 25 */
214
215 /* Valid register name styles. */
216 static const char **valid_disassembly_styles;
217
218 /* Disassembly style to use. Default to "std" register names. */
219 static const char *disassembly_style;
220
221 /* This is used to keep the bfd arch_info in sync with the disassembly
222 style. */
223 static void set_disassembly_style_sfunc(char *, int,
224 struct cmd_list_element *);
225 static void set_disassembly_style (void);
226
227 static void convert_from_extended (const struct floatformat *, const void *,
228 void *, int);
229 static void convert_to_extended (const struct floatformat *, void *,
230 const void *, int);
231
232 static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch,
233 struct regcache *regcache,
234 int regnum, gdb_byte *buf);
235 static void arm_neon_quad_write (struct gdbarch *gdbarch,
236 struct regcache *regcache,
237 int regnum, const gdb_byte *buf);
238
239 static int thumb_insn_size (unsigned short inst1);
240
241 struct arm_prologue_cache
242 {
243 /* The stack pointer at the time this frame was created; i.e. the
244 caller's stack pointer when this function was called. It is used
245 to identify this frame. */
246 CORE_ADDR prev_sp;
247
248 /* The frame base for this frame is just prev_sp - frame size.
249 FRAMESIZE is the distance from the frame pointer to the
250 initial stack pointer. */
251
252 int framesize;
253
254 /* The register used to hold the frame pointer for this frame. */
255 int framereg;
256
257 /* Saved register offsets. */
258 struct trad_frame_saved_reg *saved_regs;
259 };
260
261 static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch,
262 CORE_ADDR prologue_start,
263 CORE_ADDR prologue_end,
264 struct arm_prologue_cache *cache);
265
266 /* Architecture version for displaced stepping. This effects the behaviour of
267 certain instructions, and really should not be hard-wired. */
268
269 #define DISPLACED_STEPPING_ARCH_VERSION 5
270
271 /* Addresses for calling Thumb functions have the bit 0 set.
272 Here are some macros to test, set, or clear bit 0 of addresses. */
273 #define IS_THUMB_ADDR(addr) ((addr) & 1)
274 #define MAKE_THUMB_ADDR(addr) ((addr) | 1)
275 #define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1)
276
277 /* Set to true if the 32-bit mode is in use. */
278
279 int arm_apcs_32 = 1;
280
281 /* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */
282
283 int
284 arm_psr_thumb_bit (struct gdbarch *gdbarch)
285 {
286 if (gdbarch_tdep (gdbarch)->is_m)
287 return XPSR_T;
288 else
289 return CPSR_T;
290 }
291
292 /* Determine if FRAME is executing in Thumb mode. */
293
294 int
295 arm_frame_is_thumb (struct frame_info *frame)
296 {
297 CORE_ADDR cpsr;
298 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame));
299
300 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either
301 directly (from a signal frame or dummy frame) or by interpreting
302 the saved LR (from a prologue or DWARF frame). So consult it and
303 trust the unwinders. */
304 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
305
306 return (cpsr & t_bit) != 0;
307 }
308
309 /* Callback for VEC_lower_bound. */
310
311 static inline int
312 arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs,
313 const struct arm_mapping_symbol *rhs)
314 {
315 return lhs->value < rhs->value;
316 }
317
318 /* Search for the mapping symbol covering MEMADDR. If one is found,
319 return its type. Otherwise, return 0. If START is non-NULL,
320 set *START to the location of the mapping symbol. */
321
322 static char
323 arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start)
324 {
325 struct obj_section *sec;
326
327 /* If there are mapping symbols, consult them. */
328 sec = find_pc_section (memaddr);
329 if (sec != NULL)
330 {
331 struct arm_per_objfile *data;
332 VEC(arm_mapping_symbol_s) *map;
333 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec),
334 0 };
335 unsigned int idx;
336
337 data = objfile_data (sec->objfile, arm_objfile_data_key);
338 if (data != NULL)
339 {
340 map = data->section_maps[sec->the_bfd_section->index];
341 if (!VEC_empty (arm_mapping_symbol_s, map))
342 {
343 struct arm_mapping_symbol *map_sym;
344
345 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key,
346 arm_compare_mapping_symbols);
347
348 /* VEC_lower_bound finds the earliest ordered insertion
349 point. If the following symbol starts at this exact
350 address, we use that; otherwise, the preceding
351 mapping symbol covers this address. */
352 if (idx < VEC_length (arm_mapping_symbol_s, map))
353 {
354 map_sym = VEC_index (arm_mapping_symbol_s, map, idx);
355 if (map_sym->value == map_key.value)
356 {
357 if (start)
358 *start = map_sym->value + obj_section_addr (sec);
359 return map_sym->type;
360 }
361 }
362
363 if (idx > 0)
364 {
365 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1);
366 if (start)
367 *start = map_sym->value + obj_section_addr (sec);
368 return map_sym->type;
369 }
370 }
371 }
372 }
373
374 return 0;
375 }
376
377 /* Determine if the program counter specified in MEMADDR is in a Thumb
378 function. This function should be called for addresses unrelated to
379 any executing frame; otherwise, prefer arm_frame_is_thumb. */
380
381 int
382 arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr)
383 {
384 struct bound_minimal_symbol sym;
385 char type;
386 struct displaced_step_closure* dsc
387 = get_displaced_step_closure_by_addr(memaddr);
388
389 /* If checking the mode of displaced instruction in copy area, the mode
390 should be determined by instruction on the original address. */
391 if (dsc)
392 {
393 if (debug_displaced)
394 fprintf_unfiltered (gdb_stdlog,
395 "displaced: check mode of %.8lx instead of %.8lx\n",
396 (unsigned long) dsc->insn_addr,
397 (unsigned long) memaddr);
398 memaddr = dsc->insn_addr;
399 }
400
401 /* If bit 0 of the address is set, assume this is a Thumb address. */
402 if (IS_THUMB_ADDR (memaddr))
403 return 1;
404
405 /* Respect internal mode override if active. */
406 if (arm_override_mode != -1)
407 return arm_override_mode;
408
409 /* If the user wants to override the symbol table, let him. */
410 if (strcmp (arm_force_mode_string, "arm") == 0)
411 return 0;
412 if (strcmp (arm_force_mode_string, "thumb") == 0)
413 return 1;
414
415 /* ARM v6-M and v7-M are always in Thumb mode. */
416 if (gdbarch_tdep (gdbarch)->is_m)
417 return 1;
418
419 /* If there are mapping symbols, consult them. */
420 type = arm_find_mapping_symbol (memaddr, NULL);
421 if (type)
422 return type == 't';
423
424 /* Thumb functions have a "special" bit set in minimal symbols. */
425 sym = lookup_minimal_symbol_by_pc (memaddr);
426 if (sym.minsym)
427 return (MSYMBOL_IS_SPECIAL (sym.minsym));
428
429 /* If the user wants to override the fallback mode, let them. */
430 if (strcmp (arm_fallback_mode_string, "arm") == 0)
431 return 0;
432 if (strcmp (arm_fallback_mode_string, "thumb") == 0)
433 return 1;
434
435 /* If we couldn't find any symbol, but we're talking to a running
436 target, then trust the current value of $cpsr. This lets
437 "display/i $pc" always show the correct mode (though if there is
438 a symbol table we will not reach here, so it still may not be
439 displayed in the mode it will be executed). */
440 if (target_has_registers)
441 return arm_frame_is_thumb (get_current_frame ());
442
443 /* Otherwise we're out of luck; we assume ARM. */
444 return 0;
445 }
446
447 /* Remove useless bits from addresses in a running program. */
448 static CORE_ADDR
449 arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val)
450 {
451 /* On M-profile devices, do not strip the low bit from EXC_RETURN
452 (the magic exception return address). */
453 if (gdbarch_tdep (gdbarch)->is_m
454 && (val & 0xfffffff0) == 0xfffffff0)
455 return val;
456
457 if (arm_apcs_32)
458 return UNMAKE_THUMB_ADDR (val);
459 else
460 return (val & 0x03fffffc);
461 }
462
463 /* Return 1 if PC is the start of a compiler helper function which
464 can be safely ignored during prologue skipping. IS_THUMB is true
465 if the function is known to be a Thumb function due to the way it
466 is being called. */
467 static int
468 skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb)
469 {
470 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
471 struct bound_minimal_symbol msym;
472
473 msym = lookup_minimal_symbol_by_pc (pc);
474 if (msym.minsym != NULL
475 && SYMBOL_VALUE_ADDRESS (msym.minsym) == pc
476 && SYMBOL_LINKAGE_NAME (msym.minsym) != NULL)
477 {
478 const char *name = SYMBOL_LINKAGE_NAME (msym.minsym);
479
480 /* The GNU linker's Thumb call stub to foo is named
481 __foo_from_thumb. */
482 if (strstr (name, "_from_thumb") != NULL)
483 name += 2;
484
485 /* On soft-float targets, __truncdfsf2 is called to convert promoted
486 arguments to their argument types in non-prototyped
487 functions. */
488 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0)
489 return 1;
490 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0)
491 return 1;
492
493 /* Internal functions related to thread-local storage. */
494 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0)
495 return 1;
496 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0)
497 return 1;
498 }
499 else
500 {
501 /* If we run against a stripped glibc, we may be unable to identify
502 special functions by name. Check for one important case,
503 __aeabi_read_tp, by comparing the *code* against the default
504 implementation (this is hand-written ARM assembler in glibc). */
505
506 if (!is_thumb
507 && read_memory_unsigned_integer (pc, 4, byte_order_for_code)
508 == 0xe3e00a0f /* mov r0, #0xffff0fff */
509 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code)
510 == 0xe240f01f) /* sub pc, r0, #31 */
511 return 1;
512 }
513
514 return 0;
515 }
516
517 /* Support routines for instruction parsing. */
518 #define submask(x) ((1L << ((x) + 1)) - 1)
519 #define bit(obj,st) (((obj) >> (st)) & 1)
520 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
521 #define sbits(obj,st,fn) \
522 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st))))
523 #define BranchDest(addr,instr) \
524 ((CORE_ADDR) (((unsigned long) (addr)) + 8 + (sbits (instr, 0, 23) << 2)))
525
526 /* Extract the immediate from instruction movw/movt of encoding T. INSN1 is
527 the first 16-bit of instruction, and INSN2 is the second 16-bit of
528 instruction. */
529 #define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \
530 ((bits ((insn1), 0, 3) << 12) \
531 | (bits ((insn1), 10, 10) << 11) \
532 | (bits ((insn2), 12, 14) << 8) \
533 | bits ((insn2), 0, 7))
534
535 /* Extract the immediate from instruction movw/movt of encoding A. INSN is
536 the 32-bit instruction. */
537 #define EXTRACT_MOVW_MOVT_IMM_A(insn) \
538 ((bits ((insn), 16, 19) << 12) \
539 | bits ((insn), 0, 11))
540
541 /* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */
542
543 static unsigned int
544 thumb_expand_immediate (unsigned int imm)
545 {
546 unsigned int count = imm >> 7;
547
548 if (count < 8)
549 switch (count / 2)
550 {
551 case 0:
552 return imm & 0xff;
553 case 1:
554 return (imm & 0xff) | ((imm & 0xff) << 16);
555 case 2:
556 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24);
557 case 3:
558 return (imm & 0xff) | ((imm & 0xff) << 8)
559 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24);
560 }
561
562 return (0x80 | (imm & 0x7f)) << (32 - count);
563 }
564
565 /* Return 1 if the 16-bit Thumb instruction INST might change
566 control flow, 0 otherwise. */
567
568 static int
569 thumb_instruction_changes_pc (unsigned short inst)
570 {
571 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */
572 return 1;
573
574 if ((inst & 0xf000) == 0xd000) /* conditional branch */
575 return 1;
576
577 if ((inst & 0xf800) == 0xe000) /* unconditional branch */
578 return 1;
579
580 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */
581 return 1;
582
583 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */
584 return 1;
585
586 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */
587 return 1;
588
589 return 0;
590 }
591
592 /* Return 1 if the 32-bit Thumb instruction in INST1 and INST2
593 might change control flow, 0 otherwise. */
594
595 static int
596 thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2)
597 {
598 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
599 {
600 /* Branches and miscellaneous control instructions. */
601
602 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
603 {
604 /* B, BL, BLX. */
605 return 1;
606 }
607 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
608 {
609 /* SUBS PC, LR, #imm8. */
610 return 1;
611 }
612 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
613 {
614 /* Conditional branch. */
615 return 1;
616 }
617
618 return 0;
619 }
620
621 if ((inst1 & 0xfe50) == 0xe810)
622 {
623 /* Load multiple or RFE. */
624
625 if (bit (inst1, 7) && !bit (inst1, 8))
626 {
627 /* LDMIA or POP */
628 if (bit (inst2, 15))
629 return 1;
630 }
631 else if (!bit (inst1, 7) && bit (inst1, 8))
632 {
633 /* LDMDB */
634 if (bit (inst2, 15))
635 return 1;
636 }
637 else if (bit (inst1, 7) && bit (inst1, 8))
638 {
639 /* RFEIA */
640 return 1;
641 }
642 else if (!bit (inst1, 7) && !bit (inst1, 8))
643 {
644 /* RFEDB */
645 return 1;
646 }
647
648 return 0;
649 }
650
651 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
652 {
653 /* MOV PC or MOVS PC. */
654 return 1;
655 }
656
657 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
658 {
659 /* LDR PC. */
660 if (bits (inst1, 0, 3) == 15)
661 return 1;
662 if (bit (inst1, 7))
663 return 1;
664 if (bit (inst2, 11))
665 return 1;
666 if ((inst2 & 0x0fc0) == 0x0000)
667 return 1;
668
669 return 0;
670 }
671
672 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
673 {
674 /* TBB. */
675 return 1;
676 }
677
678 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
679 {
680 /* TBH. */
681 return 1;
682 }
683
684 return 0;
685 }
686
687 /* Analyze a Thumb prologue, looking for a recognizable stack frame
688 and frame pointer. Scan until we encounter a store that could
689 clobber the stack frame unexpectedly, or an unknown instruction.
690 Return the last address which is definitely safe to skip for an
691 initial breakpoint. */
692
693 static CORE_ADDR
694 thumb_analyze_prologue (struct gdbarch *gdbarch,
695 CORE_ADDR start, CORE_ADDR limit,
696 struct arm_prologue_cache *cache)
697 {
698 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
699 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
700 int i;
701 pv_t regs[16];
702 struct pv_area *stack;
703 struct cleanup *back_to;
704 CORE_ADDR offset;
705 CORE_ADDR unrecognized_pc = 0;
706
707 for (i = 0; i < 16; i++)
708 regs[i] = pv_register (i, 0);
709 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
710 back_to = make_cleanup_free_pv_area (stack);
711
712 while (start < limit)
713 {
714 unsigned short insn;
715
716 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code);
717
718 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */
719 {
720 int regno;
721 int mask;
722
723 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
724 break;
725
726 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says
727 whether to save LR (R14). */
728 mask = (insn & 0xff) | ((insn & 0x100) << 6);
729
730 /* Calculate offsets of saved R0-R7 and LR. */
731 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
732 if (mask & (1 << regno))
733 {
734 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
735 -4);
736 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
737 }
738 }
739 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR
740 sub sp, #simm */
741 {
742 offset = (insn & 0x7f) << 2; /* get scaled offset */
743 if (insn & 0x80) /* Check for SUB. */
744 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
745 -offset);
746 else
747 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM],
748 offset);
749 }
750 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */
751 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM],
752 (insn & 0xff) << 2);
753 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */
754 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
755 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)],
756 bits (insn, 6, 8));
757 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */
758 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
759 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)],
760 bits (insn, 0, 7));
761 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */
762 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM)
763 && pv_is_constant (regs[bits (insn, 3, 5)]))
764 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)],
765 regs[bits (insn, 6, 8)]);
766 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */
767 && pv_is_constant (regs[bits (insn, 3, 6)]))
768 {
769 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2);
770 int rm = bits (insn, 3, 6);
771 regs[rd] = pv_add (regs[rd], regs[rm]);
772 }
773 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */
774 {
775 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4);
776 int src_reg = (insn & 0x78) >> 3;
777 regs[dst_reg] = regs[src_reg];
778 }
779 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */
780 {
781 /* Handle stores to the stack. Normally pushes are used,
782 but with GCC -mtpcs-frame, there may be other stores
783 in the prologue to create the frame. */
784 int regno = (insn >> 8) & 0x7;
785 pv_t addr;
786
787 offset = (insn & 0xff) << 2;
788 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset);
789
790 if (pv_area_store_would_trash (stack, addr))
791 break;
792
793 pv_area_store (stack, addr, 4, regs[regno]);
794 }
795 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */
796 {
797 int rd = bits (insn, 0, 2);
798 int rn = bits (insn, 3, 5);
799 pv_t addr;
800
801 offset = bits (insn, 6, 10) << 2;
802 addr = pv_add_constant (regs[rn], offset);
803
804 if (pv_area_store_would_trash (stack, addr))
805 break;
806
807 pv_area_store (stack, addr, 4, regs[rd]);
808 }
809 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */
810 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */
811 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))
812 /* Ignore stores of argument registers to the stack. */
813 ;
814 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */
815 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM))
816 /* Ignore block loads from the stack, potentially copying
817 parameters from memory. */
818 ;
819 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */
820 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */
821 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)))
822 /* Similarly ignore single loads from the stack. */
823 ;
824 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */
825 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */
826 /* Skip register copies, i.e. saves to another register
827 instead of the stack. */
828 ;
829 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */
830 /* Recognize constant loads; even with small stacks these are necessary
831 on Thumb. */
832 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7));
833 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */
834 {
835 /* Constant pool loads, for the same reason. */
836 unsigned int constant;
837 CORE_ADDR loc;
838
839 loc = start + 4 + bits (insn, 0, 7) * 4;
840 constant = read_memory_unsigned_integer (loc, 4, byte_order);
841 regs[bits (insn, 8, 10)] = pv_constant (constant);
842 }
843 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instructions. */
844 {
845 unsigned short inst2;
846
847 inst2 = read_memory_unsigned_integer (start + 2, 2,
848 byte_order_for_code);
849
850 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800)
851 {
852 /* BL, BLX. Allow some special function calls when
853 skipping the prologue; GCC generates these before
854 storing arguments to the stack. */
855 CORE_ADDR nextpc;
856 int j1, j2, imm1, imm2;
857
858 imm1 = sbits (insn, 0, 10);
859 imm2 = bits (inst2, 0, 10);
860 j1 = bit (inst2, 13);
861 j2 = bit (inst2, 11);
862
863 offset = ((imm1 << 12) + (imm2 << 1));
864 offset ^= ((!j2) << 22) | ((!j1) << 23);
865
866 nextpc = start + 4 + offset;
867 /* For BLX make sure to clear the low bits. */
868 if (bit (inst2, 12) == 0)
869 nextpc = nextpc & 0xfffffffc;
870
871 if (!skip_prologue_function (gdbarch, nextpc,
872 bit (inst2, 12) != 0))
873 break;
874 }
875
876 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!},
877 { registers } */
878 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
879 {
880 pv_t addr = regs[bits (insn, 0, 3)];
881 int regno;
882
883 if (pv_area_store_would_trash (stack, addr))
884 break;
885
886 /* Calculate offsets of saved registers. */
887 for (regno = ARM_LR_REGNUM; regno >= 0; regno--)
888 if (inst2 & (1 << regno))
889 {
890 addr = pv_add_constant (addr, -4);
891 pv_area_store (stack, addr, 4, regs[regno]);
892 }
893
894 if (insn & 0x0020)
895 regs[bits (insn, 0, 3)] = addr;
896 }
897
898 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2,
899 [Rn, #+/-imm]{!} */
900 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
901 {
902 int regno1 = bits (inst2, 12, 15);
903 int regno2 = bits (inst2, 8, 11);
904 pv_t addr = regs[bits (insn, 0, 3)];
905
906 offset = inst2 & 0xff;
907 if (insn & 0x0080)
908 addr = pv_add_constant (addr, offset);
909 else
910 addr = pv_add_constant (addr, -offset);
911
912 if (pv_area_store_would_trash (stack, addr))
913 break;
914
915 pv_area_store (stack, addr, 4, regs[regno1]);
916 pv_area_store (stack, pv_add_constant (addr, 4),
917 4, regs[regno2]);
918
919 if (insn & 0x0020)
920 regs[bits (insn, 0, 3)] = addr;
921 }
922
923 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */
924 && (inst2 & 0x0c00) == 0x0c00
925 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
926 {
927 int regno = bits (inst2, 12, 15);
928 pv_t addr = regs[bits (insn, 0, 3)];
929
930 offset = inst2 & 0xff;
931 if (inst2 & 0x0200)
932 addr = pv_add_constant (addr, offset);
933 else
934 addr = pv_add_constant (addr, -offset);
935
936 if (pv_area_store_would_trash (stack, addr))
937 break;
938
939 pv_area_store (stack, addr, 4, regs[regno]);
940
941 if (inst2 & 0x0100)
942 regs[bits (insn, 0, 3)] = addr;
943 }
944
945 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */
946 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
947 {
948 int regno = bits (inst2, 12, 15);
949 pv_t addr;
950
951 offset = inst2 & 0xfff;
952 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset);
953
954 if (pv_area_store_would_trash (stack, addr))
955 break;
956
957 pv_area_store (stack, addr, 4, regs[regno]);
958 }
959
960 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */
961 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
962 /* Ignore stores of argument registers to the stack. */
963 ;
964
965 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */
966 && (inst2 & 0x0d00) == 0x0c00
967 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
968 /* Ignore stores of argument registers to the stack. */
969 ;
970
971 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!],
972 { registers } */
973 && (inst2 & 0x8000) == 0x0000
974 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
975 /* Ignore block loads from the stack, potentially copying
976 parameters from memory. */
977 ;
978
979 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2,
980 [Rn, #+/-imm] */
981 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
982 /* Similarly ignore dual loads from the stack. */
983 ;
984
985 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */
986 && (inst2 & 0x0d00) == 0x0c00
987 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
988 /* Similarly ignore single loads from the stack. */
989 ;
990
991 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */
992 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM))
993 /* Similarly ignore single loads from the stack. */
994 ;
995
996 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */
997 && (inst2 & 0x8000) == 0x0000)
998 {
999 unsigned int imm = ((bits (insn, 10, 10) << 11)
1000 | (bits (inst2, 12, 14) << 8)
1001 | bits (inst2, 0, 7));
1002
1003 regs[bits (inst2, 8, 11)]
1004 = pv_add_constant (regs[bits (insn, 0, 3)],
1005 thumb_expand_immediate (imm));
1006 }
1007
1008 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */
1009 && (inst2 & 0x8000) == 0x0000)
1010 {
1011 unsigned int imm = ((bits (insn, 10, 10) << 11)
1012 | (bits (inst2, 12, 14) << 8)
1013 | bits (inst2, 0, 7));
1014
1015 regs[bits (inst2, 8, 11)]
1016 = pv_add_constant (regs[bits (insn, 0, 3)], imm);
1017 }
1018
1019 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */
1020 && (inst2 & 0x8000) == 0x0000)
1021 {
1022 unsigned int imm = ((bits (insn, 10, 10) << 11)
1023 | (bits (inst2, 12, 14) << 8)
1024 | bits (inst2, 0, 7));
1025
1026 regs[bits (inst2, 8, 11)]
1027 = pv_add_constant (regs[bits (insn, 0, 3)],
1028 - (CORE_ADDR) thumb_expand_immediate (imm));
1029 }
1030
1031 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */
1032 && (inst2 & 0x8000) == 0x0000)
1033 {
1034 unsigned int imm = ((bits (insn, 10, 10) << 11)
1035 | (bits (inst2, 12, 14) << 8)
1036 | bits (inst2, 0, 7));
1037
1038 regs[bits (inst2, 8, 11)]
1039 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm);
1040 }
1041
1042 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */
1043 {
1044 unsigned int imm = ((bits (insn, 10, 10) << 11)
1045 | (bits (inst2, 12, 14) << 8)
1046 | bits (inst2, 0, 7));
1047
1048 regs[bits (inst2, 8, 11)]
1049 = pv_constant (thumb_expand_immediate (imm));
1050 }
1051
1052 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */
1053 {
1054 unsigned int imm
1055 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2);
1056
1057 regs[bits (inst2, 8, 11)] = pv_constant (imm);
1058 }
1059
1060 else if (insn == 0xea5f /* mov.w Rd,Rm */
1061 && (inst2 & 0xf0f0) == 0)
1062 {
1063 int dst_reg = (inst2 & 0x0f00) >> 8;
1064 int src_reg = inst2 & 0xf;
1065 regs[dst_reg] = regs[src_reg];
1066 }
1067
1068 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */
1069 {
1070 /* Constant pool loads. */
1071 unsigned int constant;
1072 CORE_ADDR loc;
1073
1074 offset = bits (insn, 0, 11);
1075 if (insn & 0x0080)
1076 loc = start + 4 + offset;
1077 else
1078 loc = start + 4 - offset;
1079
1080 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1081 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1082 }
1083
1084 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */
1085 {
1086 /* Constant pool loads. */
1087 unsigned int constant;
1088 CORE_ADDR loc;
1089
1090 offset = bits (insn, 0, 7) << 2;
1091 if (insn & 0x0080)
1092 loc = start + 4 + offset;
1093 else
1094 loc = start + 4 - offset;
1095
1096 constant = read_memory_unsigned_integer (loc, 4, byte_order);
1097 regs[bits (inst2, 12, 15)] = pv_constant (constant);
1098
1099 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order);
1100 regs[bits (inst2, 8, 11)] = pv_constant (constant);
1101 }
1102
1103 else if (thumb2_instruction_changes_pc (insn, inst2))
1104 {
1105 /* Don't scan past anything that might change control flow. */
1106 break;
1107 }
1108 else
1109 {
1110 /* The optimizer might shove anything into the prologue,
1111 so we just skip what we don't recognize. */
1112 unrecognized_pc = start;
1113 }
1114
1115 start += 2;
1116 }
1117 else if (thumb_instruction_changes_pc (insn))
1118 {
1119 /* Don't scan past anything that might change control flow. */
1120 break;
1121 }
1122 else
1123 {
1124 /* The optimizer might shove anything into the prologue,
1125 so we just skip what we don't recognize. */
1126 unrecognized_pc = start;
1127 }
1128
1129 start += 2;
1130 }
1131
1132 if (arm_debug)
1133 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1134 paddress (gdbarch, start));
1135
1136 if (unrecognized_pc == 0)
1137 unrecognized_pc = start;
1138
1139 if (cache == NULL)
1140 {
1141 do_cleanups (back_to);
1142 return unrecognized_pc;
1143 }
1144
1145 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1146 {
1147 /* Frame pointer is fp. Frame size is constant. */
1148 cache->framereg = ARM_FP_REGNUM;
1149 cache->framesize = -regs[ARM_FP_REGNUM].k;
1150 }
1151 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM))
1152 {
1153 /* Frame pointer is r7. Frame size is constant. */
1154 cache->framereg = THUMB_FP_REGNUM;
1155 cache->framesize = -regs[THUMB_FP_REGNUM].k;
1156 }
1157 else
1158 {
1159 /* Try the stack pointer... this is a bit desperate. */
1160 cache->framereg = ARM_SP_REGNUM;
1161 cache->framesize = -regs[ARM_SP_REGNUM].k;
1162 }
1163
1164 for (i = 0; i < 16; i++)
1165 if (pv_area_find_reg (stack, gdbarch, i, &offset))
1166 cache->saved_regs[i].addr = offset;
1167
1168 do_cleanups (back_to);
1169 return unrecognized_pc;
1170 }
1171
1172
1173 /* Try to analyze the instructions starting from PC, which load symbol
1174 __stack_chk_guard. Return the address of instruction after loading this
1175 symbol, set the dest register number to *BASEREG, and set the size of
1176 instructions for loading symbol in OFFSET. Return 0 if instructions are
1177 not recognized. */
1178
1179 static CORE_ADDR
1180 arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch,
1181 unsigned int *destreg, int *offset)
1182 {
1183 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1184 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1185 unsigned int low, high, address;
1186
1187 address = 0;
1188 if (is_thumb)
1189 {
1190 unsigned short insn1
1191 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
1192
1193 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */
1194 {
1195 *destreg = bits (insn1, 8, 10);
1196 *offset = 2;
1197 address = bits (insn1, 0, 7);
1198 }
1199 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */
1200 {
1201 unsigned short insn2
1202 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
1203
1204 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1205
1206 insn1
1207 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code);
1208 insn2
1209 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code);
1210
1211 /* movt Rd, #const */
1212 if ((insn1 & 0xfbc0) == 0xf2c0)
1213 {
1214 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2);
1215 *destreg = bits (insn2, 8, 11);
1216 *offset = 8;
1217 address = (high << 16 | low);
1218 }
1219 }
1220 }
1221 else
1222 {
1223 unsigned int insn
1224 = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
1225
1226 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */
1227 {
1228 address = bits (insn, 0, 11);
1229 *destreg = bits (insn, 12, 15);
1230 *offset = 4;
1231 }
1232 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */
1233 {
1234 low = EXTRACT_MOVW_MOVT_IMM_A (insn);
1235
1236 insn
1237 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code);
1238
1239 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */
1240 {
1241 high = EXTRACT_MOVW_MOVT_IMM_A (insn);
1242 *destreg = bits (insn, 12, 15);
1243 *offset = 8;
1244 address = (high << 16 | low);
1245 }
1246 }
1247 }
1248
1249 return address;
1250 }
1251
1252 /* Try to skip a sequence of instructions used for stack protector. If PC
1253 points to the first instruction of this sequence, return the address of
1254 first instruction after this sequence, otherwise, return original PC.
1255
1256 On arm, this sequence of instructions is composed of mainly three steps,
1257 Step 1: load symbol __stack_chk_guard,
1258 Step 2: load from address of __stack_chk_guard,
1259 Step 3: store it to somewhere else.
1260
1261 Usually, instructions on step 2 and step 3 are the same on various ARM
1262 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and
1263 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However,
1264 instructions in step 1 vary from different ARM architectures. On ARMv7,
1265 they are,
1266
1267 movw Rn, #:lower16:__stack_chk_guard
1268 movt Rn, #:upper16:__stack_chk_guard
1269
1270 On ARMv5t, it is,
1271
1272 ldr Rn, .Label
1273 ....
1274 .Lable:
1275 .word __stack_chk_guard
1276
1277 Since ldr/str is a very popular instruction, we can't use them as
1278 'fingerprint' or 'signature' of stack protector sequence. Here we choose
1279 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not
1280 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */
1281
1282 static CORE_ADDR
1283 arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch)
1284 {
1285 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1286 unsigned int basereg;
1287 struct bound_minimal_symbol stack_chk_guard;
1288 int offset;
1289 int is_thumb = arm_pc_is_thumb (gdbarch, pc);
1290 CORE_ADDR addr;
1291
1292 /* Try to parse the instructions in Step 1. */
1293 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch,
1294 &basereg, &offset);
1295 if (!addr)
1296 return pc;
1297
1298 stack_chk_guard = lookup_minimal_symbol_by_pc (addr);
1299 /* If name of symbol doesn't start with '__stack_chk_guard', this
1300 instruction sequence is not for stack protector. If symbol is
1301 removed, we conservatively think this sequence is for stack protector. */
1302 if (stack_chk_guard.minsym
1303 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard.minsym),
1304 "__stack_chk_guard",
1305 strlen ("__stack_chk_guard")) != 0)
1306 return pc;
1307
1308 if (is_thumb)
1309 {
1310 unsigned int destreg;
1311 unsigned short insn
1312 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code);
1313
1314 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */
1315 if ((insn & 0xf800) != 0x6800)
1316 return pc;
1317 if (bits (insn, 3, 5) != basereg)
1318 return pc;
1319 destreg = bits (insn, 0, 2);
1320
1321 insn = read_memory_unsigned_integer (pc + offset + 2, 2,
1322 byte_order_for_code);
1323 /* Step 3: str Rd, [Rn, #immed], encoding T1. */
1324 if ((insn & 0xf800) != 0x6000)
1325 return pc;
1326 if (destreg != bits (insn, 0, 2))
1327 return pc;
1328 }
1329 else
1330 {
1331 unsigned int destreg;
1332 unsigned int insn
1333 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code);
1334
1335 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */
1336 if ((insn & 0x0e500000) != 0x04100000)
1337 return pc;
1338 if (bits (insn, 16, 19) != basereg)
1339 return pc;
1340 destreg = bits (insn, 12, 15);
1341 /* Step 3: str Rd, [Rn, #immed], encoding A1. */
1342 insn = read_memory_unsigned_integer (pc + offset + 4,
1343 4, byte_order_for_code);
1344 if ((insn & 0x0e500000) != 0x04000000)
1345 return pc;
1346 if (bits (insn, 12, 15) != destreg)
1347 return pc;
1348 }
1349 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8
1350 on arm. */
1351 if (is_thumb)
1352 return pc + offset + 4;
1353 else
1354 return pc + offset + 8;
1355 }
1356
1357 /* Advance the PC across any function entry prologue instructions to
1358 reach some "real" code.
1359
1360 The APCS (ARM Procedure Call Standard) defines the following
1361 prologue:
1362
1363 mov ip, sp
1364 [stmfd sp!, {a1,a2,a3,a4}]
1365 stmfd sp!, {...,fp,ip,lr,pc}
1366 [stfe f7, [sp, #-12]!]
1367 [stfe f6, [sp, #-12]!]
1368 [stfe f5, [sp, #-12]!]
1369 [stfe f4, [sp, #-12]!]
1370 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */
1371
1372 static CORE_ADDR
1373 arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1374 {
1375 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1376 unsigned long inst;
1377 CORE_ADDR skip_pc;
1378 CORE_ADDR func_addr, limit_pc;
1379
1380 /* See if we can determine the end of the prologue via the symbol table.
1381 If so, then return either PC, or the PC after the prologue, whichever
1382 is greater. */
1383 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1384 {
1385 CORE_ADDR post_prologue_pc
1386 = skip_prologue_using_sal (gdbarch, func_addr);
1387 struct symtab *s = find_pc_symtab (func_addr);
1388
1389 if (post_prologue_pc)
1390 post_prologue_pc
1391 = arm_skip_stack_protector (post_prologue_pc, gdbarch);
1392
1393
1394 /* GCC always emits a line note before the prologue and another
1395 one after, even if the two are at the same address or on the
1396 same line. Take advantage of this so that we do not need to
1397 know every instruction that might appear in the prologue. We
1398 will have producer information for most binaries; if it is
1399 missing (e.g. for -gstabs), assuming the GNU tools. */
1400 if (post_prologue_pc
1401 && (s == NULL
1402 || s->producer == NULL
1403 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0
1404 || strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
1405 return post_prologue_pc;
1406
1407 if (post_prologue_pc != 0)
1408 {
1409 CORE_ADDR analyzed_limit;
1410
1411 /* For non-GCC compilers, make sure the entire line is an
1412 acceptable prologue; GDB will round this function's
1413 return value up to the end of the following line so we
1414 can not skip just part of a line (and we do not want to).
1415
1416 RealView does not treat the prologue specially, but does
1417 associate prologue code with the opening brace; so this
1418 lets us skip the first line if we think it is the opening
1419 brace. */
1420 if (arm_pc_is_thumb (gdbarch, func_addr))
1421 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr,
1422 post_prologue_pc, NULL);
1423 else
1424 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr,
1425 post_prologue_pc, NULL);
1426
1427 if (analyzed_limit != post_prologue_pc)
1428 return func_addr;
1429
1430 return post_prologue_pc;
1431 }
1432 }
1433
1434 /* Can't determine prologue from the symbol table, need to examine
1435 instructions. */
1436
1437 /* Find an upper limit on the function prologue using the debug
1438 information. If the debug information could not be used to provide
1439 that bound, then use an arbitrary large number as the upper bound. */
1440 /* Like arm_scan_prologue, stop no later than pc + 64. */
1441 limit_pc = skip_prologue_using_sal (gdbarch, pc);
1442 if (limit_pc == 0)
1443 limit_pc = pc + 64; /* Magic. */
1444
1445
1446 /* Check if this is Thumb code. */
1447 if (arm_pc_is_thumb (gdbarch, pc))
1448 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL);
1449
1450 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4)
1451 {
1452 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code);
1453
1454 /* "mov ip, sp" is no longer a required part of the prologue. */
1455 if (inst == 0xe1a0c00d) /* mov ip, sp */
1456 continue;
1457
1458 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */
1459 continue;
1460
1461 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */
1462 continue;
1463
1464 /* Some prologues begin with "str lr, [sp, #-4]!". */
1465 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */
1466 continue;
1467
1468 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */
1469 continue;
1470
1471 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */
1472 continue;
1473
1474 /* Any insns after this point may float into the code, if it makes
1475 for better instruction scheduling, so we skip them only if we
1476 find them, but still consider the function to be frame-ful. */
1477
1478 /* We may have either one sfmfd instruction here, or several stfe
1479 insns, depending on the version of floating point code we
1480 support. */
1481 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */
1482 continue;
1483
1484 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */
1485 continue;
1486
1487 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */
1488 continue;
1489
1490 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */
1491 continue;
1492
1493 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */
1494 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */
1495 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */
1496 continue;
1497
1498 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */
1499 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */
1500 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */
1501 continue;
1502
1503 /* Un-recognized instruction; stop scanning. */
1504 break;
1505 }
1506
1507 return skip_pc; /* End of prologue. */
1508 }
1509
1510 /* *INDENT-OFF* */
1511 /* Function: thumb_scan_prologue (helper function for arm_scan_prologue)
1512 This function decodes a Thumb function prologue to determine:
1513 1) the size of the stack frame
1514 2) which registers are saved on it
1515 3) the offsets of saved regs
1516 4) the offset from the stack pointer to the frame pointer
1517
1518 A typical Thumb function prologue would create this stack frame
1519 (offsets relative to FP)
1520 old SP -> 24 stack parameters
1521 20 LR
1522 16 R7
1523 R7 -> 0 local variables (16 bytes)
1524 SP -> -12 additional stack space (12 bytes)
1525 The frame size would thus be 36 bytes, and the frame offset would be
1526 12 bytes. The frame register is R7.
1527
1528 The comments for thumb_skip_prolog() describe the algorithm we use
1529 to detect the end of the prolog. */
1530 /* *INDENT-ON* */
1531
1532 static void
1533 thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc,
1534 CORE_ADDR block_addr, struct arm_prologue_cache *cache)
1535 {
1536 CORE_ADDR prologue_start;
1537 CORE_ADDR prologue_end;
1538
1539 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1540 &prologue_end))
1541 {
1542 /* See comment in arm_scan_prologue for an explanation of
1543 this heuristics. */
1544 if (prologue_end > prologue_start + 64)
1545 {
1546 prologue_end = prologue_start + 64;
1547 }
1548 }
1549 else
1550 /* We're in the boondocks: we have no idea where the start of the
1551 function is. */
1552 return;
1553
1554 prologue_end = min (prologue_end, prev_pc);
1555
1556 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
1557 }
1558
1559 /* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */
1560
1561 static int
1562 arm_instruction_changes_pc (uint32_t this_instr)
1563 {
1564 if (bits (this_instr, 28, 31) == INST_NV)
1565 /* Unconditional instructions. */
1566 switch (bits (this_instr, 24, 27))
1567 {
1568 case 0xa:
1569 case 0xb:
1570 /* Branch with Link and change to Thumb. */
1571 return 1;
1572 case 0xc:
1573 case 0xd:
1574 case 0xe:
1575 /* Coprocessor register transfer. */
1576 if (bits (this_instr, 12, 15) == 15)
1577 error (_("Invalid update to pc in instruction"));
1578 return 0;
1579 default:
1580 return 0;
1581 }
1582 else
1583 switch (bits (this_instr, 25, 27))
1584 {
1585 case 0x0:
1586 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0)
1587 {
1588 /* Multiplies and extra load/stores. */
1589 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1)
1590 /* Neither multiplies nor extension load/stores are allowed
1591 to modify PC. */
1592 return 0;
1593
1594 /* Otherwise, miscellaneous instructions. */
1595
1596 /* BX <reg>, BXJ <reg>, BLX <reg> */
1597 if (bits (this_instr, 4, 27) == 0x12fff1
1598 || bits (this_instr, 4, 27) == 0x12fff2
1599 || bits (this_instr, 4, 27) == 0x12fff3)
1600 return 1;
1601
1602 /* Other miscellaneous instructions are unpredictable if they
1603 modify PC. */
1604 return 0;
1605 }
1606 /* Data processing instruction. Fall through. */
1607
1608 case 0x1:
1609 if (bits (this_instr, 12, 15) == 15)
1610 return 1;
1611 else
1612 return 0;
1613
1614 case 0x2:
1615 case 0x3:
1616 /* Media instructions and architecturally undefined instructions. */
1617 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1)
1618 return 0;
1619
1620 /* Stores. */
1621 if (bit (this_instr, 20) == 0)
1622 return 0;
1623
1624 /* Loads. */
1625 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM)
1626 return 1;
1627 else
1628 return 0;
1629
1630 case 0x4:
1631 /* Load/store multiple. */
1632 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1)
1633 return 1;
1634 else
1635 return 0;
1636
1637 case 0x5:
1638 /* Branch and branch with link. */
1639 return 1;
1640
1641 case 0x6:
1642 case 0x7:
1643 /* Coprocessor transfers or SWIs can not affect PC. */
1644 return 0;
1645
1646 default:
1647 internal_error (__FILE__, __LINE__, _("bad value in switch"));
1648 }
1649 }
1650
1651 /* Analyze an ARM mode prologue starting at PROLOGUE_START and
1652 continuing no further than PROLOGUE_END. If CACHE is non-NULL,
1653 fill it in. Return the first address not recognized as a prologue
1654 instruction.
1655
1656 We recognize all the instructions typically found in ARM prologues,
1657 plus harmless instructions which can be skipped (either for analysis
1658 purposes, or a more restrictive set that can be skipped when finding
1659 the end of the prologue). */
1660
1661 static CORE_ADDR
1662 arm_analyze_prologue (struct gdbarch *gdbarch,
1663 CORE_ADDR prologue_start, CORE_ADDR prologue_end,
1664 struct arm_prologue_cache *cache)
1665 {
1666 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1667 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
1668 int regno;
1669 CORE_ADDR offset, current_pc;
1670 pv_t regs[ARM_FPS_REGNUM];
1671 struct pv_area *stack;
1672 struct cleanup *back_to;
1673 int framereg, framesize;
1674 CORE_ADDR unrecognized_pc = 0;
1675
1676 /* Search the prologue looking for instructions that set up the
1677 frame pointer, adjust the stack pointer, and save registers.
1678
1679 Be careful, however, and if it doesn't look like a prologue,
1680 don't try to scan it. If, for instance, a frameless function
1681 begins with stmfd sp!, then we will tell ourselves there is
1682 a frame, which will confuse stack traceback, as well as "finish"
1683 and other operations that rely on a knowledge of the stack
1684 traceback. */
1685
1686 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1687 regs[regno] = pv_register (regno, 0);
1688 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch));
1689 back_to = make_cleanup_free_pv_area (stack);
1690
1691 for (current_pc = prologue_start;
1692 current_pc < prologue_end;
1693 current_pc += 4)
1694 {
1695 unsigned int insn
1696 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code);
1697
1698 if (insn == 0xe1a0c00d) /* mov ip, sp */
1699 {
1700 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM];
1701 continue;
1702 }
1703 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */
1704 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1705 {
1706 unsigned imm = insn & 0xff; /* immediate value */
1707 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1708 int rd = bits (insn, 12, 15);
1709 imm = (imm >> rot) | (imm << (32 - rot));
1710 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm);
1711 continue;
1712 }
1713 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */
1714 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1715 {
1716 unsigned imm = insn & 0xff; /* immediate value */
1717 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1718 int rd = bits (insn, 12, 15);
1719 imm = (imm >> rot) | (imm << (32 - rot));
1720 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm);
1721 continue;
1722 }
1723 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd,
1724 [sp, #-4]! */
1725 {
1726 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1727 break;
1728 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1729 pv_area_store (stack, regs[ARM_SP_REGNUM], 4,
1730 regs[bits (insn, 12, 15)]);
1731 continue;
1732 }
1733 else if ((insn & 0xffff0000) == 0xe92d0000)
1734 /* stmfd sp!, {..., fp, ip, lr, pc}
1735 or
1736 stmfd sp!, {a1, a2, a3, a4} */
1737 {
1738 int mask = insn & 0xffff;
1739
1740 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1741 break;
1742
1743 /* Calculate offsets of saved registers. */
1744 for (regno = ARM_PC_REGNUM; regno >= 0; regno--)
1745 if (mask & (1 << regno))
1746 {
1747 regs[ARM_SP_REGNUM]
1748 = pv_add_constant (regs[ARM_SP_REGNUM], -4);
1749 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]);
1750 }
1751 }
1752 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */
1753 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */
1754 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */
1755 {
1756 /* No need to add this to saved_regs -- it's just an arg reg. */
1757 continue;
1758 }
1759 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */
1760 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */
1761 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */
1762 {
1763 /* No need to add this to saved_regs -- it's just an arg reg. */
1764 continue;
1765 }
1766 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn,
1767 { registers } */
1768 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1769 {
1770 /* No need to add this to saved_regs -- it's just arg regs. */
1771 continue;
1772 }
1773 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */
1774 {
1775 unsigned imm = insn & 0xff; /* immediate value */
1776 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1777 imm = (imm >> rot) | (imm << (32 - rot));
1778 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm);
1779 }
1780 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */
1781 {
1782 unsigned imm = insn & 0xff; /* immediate value */
1783 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */
1784 imm = (imm >> rot) | (imm << (32 - rot));
1785 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm);
1786 }
1787 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?,
1788 [sp, -#c]! */
1789 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1790 {
1791 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1792 break;
1793
1794 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1795 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07);
1796 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]);
1797 }
1798 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4,
1799 [sp!] */
1800 && gdbarch_tdep (gdbarch)->have_fpa_registers)
1801 {
1802 int n_saved_fp_regs;
1803 unsigned int fp_start_reg, fp_bound_reg;
1804
1805 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM]))
1806 break;
1807
1808 if ((insn & 0x800) == 0x800) /* N0 is set */
1809 {
1810 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1811 n_saved_fp_regs = 3;
1812 else
1813 n_saved_fp_regs = 1;
1814 }
1815 else
1816 {
1817 if ((insn & 0x40000) == 0x40000) /* N1 is set */
1818 n_saved_fp_regs = 2;
1819 else
1820 n_saved_fp_regs = 4;
1821 }
1822
1823 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7);
1824 fp_bound_reg = fp_start_reg + n_saved_fp_regs;
1825 for (; fp_start_reg < fp_bound_reg; fp_start_reg++)
1826 {
1827 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12);
1828 pv_area_store (stack, regs[ARM_SP_REGNUM], 12,
1829 regs[fp_start_reg++]);
1830 }
1831 }
1832 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */
1833 {
1834 /* Allow some special function calls when skipping the
1835 prologue; GCC generates these before storing arguments to
1836 the stack. */
1837 CORE_ADDR dest = BranchDest (current_pc, insn);
1838
1839 if (skip_prologue_function (gdbarch, dest, 0))
1840 continue;
1841 else
1842 break;
1843 }
1844 else if ((insn & 0xf0000000) != 0xe0000000)
1845 break; /* Condition not true, exit early. */
1846 else if (arm_instruction_changes_pc (insn))
1847 /* Don't scan past anything that might change control flow. */
1848 break;
1849 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */
1850 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1851 /* Ignore block loads from the stack, potentially copying
1852 parameters from memory. */
1853 continue;
1854 else if ((insn & 0xfc500000) == 0xe4100000
1855 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM))
1856 /* Similarly ignore single loads from the stack. */
1857 continue;
1858 else if ((insn & 0xffff0ff0) == 0xe1a00000)
1859 /* MOV Rd, Rm. Skip register copies, i.e. saves to another
1860 register instead of the stack. */
1861 continue;
1862 else
1863 {
1864 /* The optimizer might shove anything into the prologue,
1865 so we just skip what we don't recognize. */
1866 unrecognized_pc = current_pc;
1867 continue;
1868 }
1869 }
1870
1871 if (unrecognized_pc == 0)
1872 unrecognized_pc = current_pc;
1873
1874 /* The frame size is just the distance from the frame register
1875 to the original stack pointer. */
1876 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM))
1877 {
1878 /* Frame pointer is fp. */
1879 framereg = ARM_FP_REGNUM;
1880 framesize = -regs[ARM_FP_REGNUM].k;
1881 }
1882 else
1883 {
1884 /* Try the stack pointer... this is a bit desperate. */
1885 framereg = ARM_SP_REGNUM;
1886 framesize = -regs[ARM_SP_REGNUM].k;
1887 }
1888
1889 if (cache)
1890 {
1891 cache->framereg = framereg;
1892 cache->framesize = framesize;
1893
1894 for (regno = 0; regno < ARM_FPS_REGNUM; regno++)
1895 if (pv_area_find_reg (stack, gdbarch, regno, &offset))
1896 cache->saved_regs[regno].addr = offset;
1897 }
1898
1899 if (arm_debug)
1900 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n",
1901 paddress (gdbarch, unrecognized_pc));
1902
1903 do_cleanups (back_to);
1904 return unrecognized_pc;
1905 }
1906
1907 static void
1908 arm_scan_prologue (struct frame_info *this_frame,
1909 struct arm_prologue_cache *cache)
1910 {
1911 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1912 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1913 int regno;
1914 CORE_ADDR prologue_start, prologue_end, current_pc;
1915 CORE_ADDR prev_pc = get_frame_pc (this_frame);
1916 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
1917 pv_t regs[ARM_FPS_REGNUM];
1918 struct pv_area *stack;
1919 struct cleanup *back_to;
1920 CORE_ADDR offset;
1921
1922 /* Assume there is no frame until proven otherwise. */
1923 cache->framereg = ARM_SP_REGNUM;
1924 cache->framesize = 0;
1925
1926 /* Check for Thumb prologue. */
1927 if (arm_frame_is_thumb (this_frame))
1928 {
1929 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache);
1930 return;
1931 }
1932
1933 /* Find the function prologue. If we can't find the function in
1934 the symbol table, peek in the stack frame to find the PC. */
1935 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
1936 &prologue_end))
1937 {
1938 /* One way to find the end of the prologue (which works well
1939 for unoptimized code) is to do the following:
1940
1941 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
1942
1943 if (sal.line == 0)
1944 prologue_end = prev_pc;
1945 else if (sal.end < prologue_end)
1946 prologue_end = sal.end;
1947
1948 This mechanism is very accurate so long as the optimizer
1949 doesn't move any instructions from the function body into the
1950 prologue. If this happens, sal.end will be the last
1951 instruction in the first hunk of prologue code just before
1952 the first instruction that the scheduler has moved from
1953 the body to the prologue.
1954
1955 In order to make sure that we scan all of the prologue
1956 instructions, we use a slightly less accurate mechanism which
1957 may scan more than necessary. To help compensate for this
1958 lack of accuracy, the prologue scanning loop below contains
1959 several clauses which'll cause the loop to terminate early if
1960 an implausible prologue instruction is encountered.
1961
1962 The expression
1963
1964 prologue_start + 64
1965
1966 is a suitable endpoint since it accounts for the largest
1967 possible prologue plus up to five instructions inserted by
1968 the scheduler. */
1969
1970 if (prologue_end > prologue_start + 64)
1971 {
1972 prologue_end = prologue_start + 64; /* See above. */
1973 }
1974 }
1975 else
1976 {
1977 /* We have no symbol information. Our only option is to assume this
1978 function has a standard stack frame and the normal frame register.
1979 Then, we can find the value of our frame pointer on entrance to
1980 the callee (or at the present moment if this is the innermost frame).
1981 The value stored there should be the address of the stmfd + 8. */
1982 CORE_ADDR frame_loc;
1983 LONGEST return_value;
1984
1985 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM);
1986 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value))
1987 return;
1988 else
1989 {
1990 prologue_start = gdbarch_addr_bits_remove
1991 (gdbarch, return_value) - 8;
1992 prologue_end = prologue_start + 64; /* See above. */
1993 }
1994 }
1995
1996 if (prev_pc < prologue_end)
1997 prologue_end = prev_pc;
1998
1999 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
2000 }
2001
2002 static struct arm_prologue_cache *
2003 arm_make_prologue_cache (struct frame_info *this_frame)
2004 {
2005 int reg;
2006 struct arm_prologue_cache *cache;
2007 CORE_ADDR unwound_fp;
2008
2009 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2010 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2011
2012 arm_scan_prologue (this_frame, cache);
2013
2014 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
2015 if (unwound_fp == 0)
2016 return cache;
2017
2018 cache->prev_sp = unwound_fp + cache->framesize;
2019
2020 /* Calculate actual addresses of saved registers using offsets
2021 determined by arm_scan_prologue. */
2022 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
2023 if (trad_frame_addr_p (cache->saved_regs, reg))
2024 cache->saved_regs[reg].addr += cache->prev_sp;
2025
2026 return cache;
2027 }
2028
2029 /* Our frame ID for a normal frame is the current function's starting PC
2030 and the caller's SP when we were called. */
2031
2032 static void
2033 arm_prologue_this_id (struct frame_info *this_frame,
2034 void **this_cache,
2035 struct frame_id *this_id)
2036 {
2037 struct arm_prologue_cache *cache;
2038 struct frame_id id;
2039 CORE_ADDR pc, func;
2040
2041 if (*this_cache == NULL)
2042 *this_cache = arm_make_prologue_cache (this_frame);
2043 cache = *this_cache;
2044
2045 /* This is meant to halt the backtrace at "_start". */
2046 pc = get_frame_pc (this_frame);
2047 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
2048 return;
2049
2050 /* If we've hit a wall, stop. */
2051 if (cache->prev_sp == 0)
2052 return;
2053
2054 /* Use function start address as part of the frame ID. If we cannot
2055 identify the start address (due to missing symbol information),
2056 fall back to just using the current PC. */
2057 func = get_frame_func (this_frame);
2058 if (!func)
2059 func = pc;
2060
2061 id = frame_id_build (cache->prev_sp, func);
2062 *this_id = id;
2063 }
2064
2065 static struct value *
2066 arm_prologue_prev_register (struct frame_info *this_frame,
2067 void **this_cache,
2068 int prev_regnum)
2069 {
2070 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2071 struct arm_prologue_cache *cache;
2072
2073 if (*this_cache == NULL)
2074 *this_cache = arm_make_prologue_cache (this_frame);
2075 cache = *this_cache;
2076
2077 /* If we are asked to unwind the PC, then we need to return the LR
2078 instead. The prologue may save PC, but it will point into this
2079 frame's prologue, not the next frame's resume location. Also
2080 strip the saved T bit. A valid LR may have the low bit set, but
2081 a valid PC never does. */
2082 if (prev_regnum == ARM_PC_REGNUM)
2083 {
2084 CORE_ADDR lr;
2085
2086 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2087 return frame_unwind_got_constant (this_frame, prev_regnum,
2088 arm_addr_bits_remove (gdbarch, lr));
2089 }
2090
2091 /* SP is generally not saved to the stack, but this frame is
2092 identified by the next frame's stack pointer at the time of the call.
2093 The value was already reconstructed into PREV_SP. */
2094 if (prev_regnum == ARM_SP_REGNUM)
2095 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp);
2096
2097 /* The CPSR may have been changed by the call instruction and by the
2098 called function. The only bit we can reconstruct is the T bit,
2099 by checking the low bit of LR as of the call. This is a reliable
2100 indicator of Thumb-ness except for some ARM v4T pre-interworking
2101 Thumb code, which could get away with a clear low bit as long as
2102 the called function did not use bx. Guess that all other
2103 bits are unchanged; the condition flags are presumably lost,
2104 but the processor status is likely valid. */
2105 if (prev_regnum == ARM_PS_REGNUM)
2106 {
2107 CORE_ADDR lr, cpsr;
2108 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
2109
2110 cpsr = get_frame_register_unsigned (this_frame, prev_regnum);
2111 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
2112 if (IS_THUMB_ADDR (lr))
2113 cpsr |= t_bit;
2114 else
2115 cpsr &= ~t_bit;
2116 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr);
2117 }
2118
2119 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
2120 prev_regnum);
2121 }
2122
2123 struct frame_unwind arm_prologue_unwind = {
2124 NORMAL_FRAME,
2125 default_frame_unwind_stop_reason,
2126 arm_prologue_this_id,
2127 arm_prologue_prev_register,
2128 NULL,
2129 default_frame_sniffer
2130 };
2131
2132 /* Maintain a list of ARM exception table entries per objfile, similar to the
2133 list of mapping symbols. We only cache entries for standard ARM-defined
2134 personality routines; the cache will contain only the frame unwinding
2135 instructions associated with the entry (not the descriptors). */
2136
2137 static const struct objfile_data *arm_exidx_data_key;
2138
2139 struct arm_exidx_entry
2140 {
2141 bfd_vma addr;
2142 gdb_byte *entry;
2143 };
2144 typedef struct arm_exidx_entry arm_exidx_entry_s;
2145 DEF_VEC_O(arm_exidx_entry_s);
2146
2147 struct arm_exidx_data
2148 {
2149 VEC(arm_exidx_entry_s) **section_maps;
2150 };
2151
2152 static void
2153 arm_exidx_data_free (struct objfile *objfile, void *arg)
2154 {
2155 struct arm_exidx_data *data = arg;
2156 unsigned int i;
2157
2158 for (i = 0; i < objfile->obfd->section_count; i++)
2159 VEC_free (arm_exidx_entry_s, data->section_maps[i]);
2160 }
2161
2162 static inline int
2163 arm_compare_exidx_entries (const struct arm_exidx_entry *lhs,
2164 const struct arm_exidx_entry *rhs)
2165 {
2166 return lhs->addr < rhs->addr;
2167 }
2168
2169 static struct obj_section *
2170 arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma)
2171 {
2172 struct obj_section *osect;
2173
2174 ALL_OBJFILE_OSECTIONS (objfile, osect)
2175 if (bfd_get_section_flags (objfile->obfd,
2176 osect->the_bfd_section) & SEC_ALLOC)
2177 {
2178 bfd_vma start, size;
2179 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section);
2180 size = bfd_get_section_size (osect->the_bfd_section);
2181
2182 if (start <= vma && vma < start + size)
2183 return osect;
2184 }
2185
2186 return NULL;
2187 }
2188
2189 /* Parse contents of exception table and exception index sections
2190 of OBJFILE, and fill in the exception table entry cache.
2191
2192 For each entry that refers to a standard ARM-defined personality
2193 routine, extract the frame unwinding instructions (from either
2194 the index or the table section). The unwinding instructions
2195 are normalized by:
2196 - extracting them from the rest of the table data
2197 - converting to host endianness
2198 - appending the implicit 0xb0 ("Finish") code
2199
2200 The extracted and normalized instructions are stored for later
2201 retrieval by the arm_find_exidx_entry routine. */
2202
2203 static void
2204 arm_exidx_new_objfile (struct objfile *objfile)
2205 {
2206 struct cleanup *cleanups;
2207 struct arm_exidx_data *data;
2208 asection *exidx, *extab;
2209 bfd_vma exidx_vma = 0, extab_vma = 0;
2210 bfd_size_type exidx_size = 0, extab_size = 0;
2211 gdb_byte *exidx_data = NULL, *extab_data = NULL;
2212 LONGEST i;
2213
2214 /* If we've already touched this file, do nothing. */
2215 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL)
2216 return;
2217 cleanups = make_cleanup (null_cleanup, NULL);
2218
2219 /* Read contents of exception table and index. */
2220 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx");
2221 if (exidx)
2222 {
2223 exidx_vma = bfd_section_vma (objfile->obfd, exidx);
2224 exidx_size = bfd_get_section_size (exidx);
2225 exidx_data = xmalloc (exidx_size);
2226 make_cleanup (xfree, exidx_data);
2227
2228 if (!bfd_get_section_contents (objfile->obfd, exidx,
2229 exidx_data, 0, exidx_size))
2230 {
2231 do_cleanups (cleanups);
2232 return;
2233 }
2234 }
2235
2236 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab");
2237 if (extab)
2238 {
2239 extab_vma = bfd_section_vma (objfile->obfd, extab);
2240 extab_size = bfd_get_section_size (extab);
2241 extab_data = xmalloc (extab_size);
2242 make_cleanup (xfree, extab_data);
2243
2244 if (!bfd_get_section_contents (objfile->obfd, extab,
2245 extab_data, 0, extab_size))
2246 {
2247 do_cleanups (cleanups);
2248 return;
2249 }
2250 }
2251
2252 /* Allocate exception table data structure. */
2253 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data);
2254 set_objfile_data (objfile, arm_exidx_data_key, data);
2255 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
2256 objfile->obfd->section_count,
2257 VEC(arm_exidx_entry_s) *);
2258
2259 /* Fill in exception table. */
2260 for (i = 0; i < exidx_size / 8; i++)
2261 {
2262 struct arm_exidx_entry new_exidx_entry;
2263 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8);
2264 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4);
2265 bfd_vma addr = 0, word = 0;
2266 int n_bytes = 0, n_words = 0;
2267 struct obj_section *sec;
2268 gdb_byte *entry = NULL;
2269
2270 /* Extract address of start of function. */
2271 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2272 idx += exidx_vma + i * 8;
2273
2274 /* Find section containing function and compute section offset. */
2275 sec = arm_obj_section_from_vma (objfile, idx);
2276 if (sec == NULL)
2277 continue;
2278 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section);
2279
2280 /* Determine address of exception table entry. */
2281 if (val == 1)
2282 {
2283 /* EXIDX_CANTUNWIND -- no exception table entry present. */
2284 }
2285 else if ((val & 0xff000000) == 0x80000000)
2286 {
2287 /* Exception table entry embedded in .ARM.exidx
2288 -- must be short form. */
2289 word = val;
2290 n_bytes = 3;
2291 }
2292 else if (!(val & 0x80000000))
2293 {
2294 /* Exception table entry in .ARM.extab. */
2295 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2296 addr += exidx_vma + i * 8 + 4;
2297
2298 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size)
2299 {
2300 word = bfd_h_get_32 (objfile->obfd,
2301 extab_data + addr - extab_vma);
2302 addr += 4;
2303
2304 if ((word & 0xff000000) == 0x80000000)
2305 {
2306 /* Short form. */
2307 n_bytes = 3;
2308 }
2309 else if ((word & 0xff000000) == 0x81000000
2310 || (word & 0xff000000) == 0x82000000)
2311 {
2312 /* Long form. */
2313 n_bytes = 2;
2314 n_words = ((word >> 16) & 0xff);
2315 }
2316 else if (!(word & 0x80000000))
2317 {
2318 bfd_vma pers;
2319 struct obj_section *pers_sec;
2320 int gnu_personality = 0;
2321
2322 /* Custom personality routine. */
2323 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000;
2324 pers = UNMAKE_THUMB_ADDR (pers + addr - 4);
2325
2326 /* Check whether we've got one of the variants of the
2327 GNU personality routines. */
2328 pers_sec = arm_obj_section_from_vma (objfile, pers);
2329 if (pers_sec)
2330 {
2331 static const char *personality[] =
2332 {
2333 "__gcc_personality_v0",
2334 "__gxx_personality_v0",
2335 "__gcj_personality_v0",
2336 "__gnu_objc_personality_v0",
2337 NULL
2338 };
2339
2340 CORE_ADDR pc = pers + obj_section_offset (pers_sec);
2341 int k;
2342
2343 for (k = 0; personality[k]; k++)
2344 if (lookup_minimal_symbol_by_pc_name
2345 (pc, personality[k], objfile))
2346 {
2347 gnu_personality = 1;
2348 break;
2349 }
2350 }
2351
2352 /* If so, the next word contains a word count in the high
2353 byte, followed by the same unwind instructions as the
2354 pre-defined forms. */
2355 if (gnu_personality
2356 && addr + 4 <= extab_vma + extab_size)
2357 {
2358 word = bfd_h_get_32 (objfile->obfd,
2359 extab_data + addr - extab_vma);
2360 addr += 4;
2361 n_bytes = 3;
2362 n_words = ((word >> 24) & 0xff);
2363 }
2364 }
2365 }
2366 }
2367
2368 /* Sanity check address. */
2369 if (n_words)
2370 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size)
2371 n_words = n_bytes = 0;
2372
2373 /* The unwind instructions reside in WORD (only the N_BYTES least
2374 significant bytes are valid), followed by N_WORDS words in the
2375 extab section starting at ADDR. */
2376 if (n_bytes || n_words)
2377 {
2378 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack,
2379 n_bytes + n_words * 4 + 1);
2380
2381 while (n_bytes--)
2382 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff);
2383
2384 while (n_words--)
2385 {
2386 word = bfd_h_get_32 (objfile->obfd,
2387 extab_data + addr - extab_vma);
2388 addr += 4;
2389
2390 *p++ = (gdb_byte) ((word >> 24) & 0xff);
2391 *p++ = (gdb_byte) ((word >> 16) & 0xff);
2392 *p++ = (gdb_byte) ((word >> 8) & 0xff);
2393 *p++ = (gdb_byte) (word & 0xff);
2394 }
2395
2396 /* Implied "Finish" to terminate the list. */
2397 *p++ = 0xb0;
2398 }
2399
2400 /* Push entry onto vector. They are guaranteed to always
2401 appear in order of increasing addresses. */
2402 new_exidx_entry.addr = idx;
2403 new_exidx_entry.entry = entry;
2404 VEC_safe_push (arm_exidx_entry_s,
2405 data->section_maps[sec->the_bfd_section->index],
2406 &new_exidx_entry);
2407 }
2408
2409 do_cleanups (cleanups);
2410 }
2411
2412 /* Search for the exception table entry covering MEMADDR. If one is found,
2413 return a pointer to its data. Otherwise, return 0. If START is non-NULL,
2414 set *START to the start of the region covered by this entry. */
2415
2416 static gdb_byte *
2417 arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start)
2418 {
2419 struct obj_section *sec;
2420
2421 sec = find_pc_section (memaddr);
2422 if (sec != NULL)
2423 {
2424 struct arm_exidx_data *data;
2425 VEC(arm_exidx_entry_s) *map;
2426 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 };
2427 unsigned int idx;
2428
2429 data = objfile_data (sec->objfile, arm_exidx_data_key);
2430 if (data != NULL)
2431 {
2432 map = data->section_maps[sec->the_bfd_section->index];
2433 if (!VEC_empty (arm_exidx_entry_s, map))
2434 {
2435 struct arm_exidx_entry *map_sym;
2436
2437 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key,
2438 arm_compare_exidx_entries);
2439
2440 /* VEC_lower_bound finds the earliest ordered insertion
2441 point. If the following symbol starts at this exact
2442 address, we use that; otherwise, the preceding
2443 exception table entry covers this address. */
2444 if (idx < VEC_length (arm_exidx_entry_s, map))
2445 {
2446 map_sym = VEC_index (arm_exidx_entry_s, map, idx);
2447 if (map_sym->addr == map_key.addr)
2448 {
2449 if (start)
2450 *start = map_sym->addr + obj_section_addr (sec);
2451 return map_sym->entry;
2452 }
2453 }
2454
2455 if (idx > 0)
2456 {
2457 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1);
2458 if (start)
2459 *start = map_sym->addr + obj_section_addr (sec);
2460 return map_sym->entry;
2461 }
2462 }
2463 }
2464 }
2465
2466 return NULL;
2467 }
2468
2469 /* Given the current frame THIS_FRAME, and its associated frame unwinding
2470 instruction list from the ARM exception table entry ENTRY, allocate and
2471 return a prologue cache structure describing how to unwind this frame.
2472
2473 Return NULL if the unwinding instruction list contains a "spare",
2474 "reserved" or "refuse to unwind" instruction as defined in section
2475 "9.3 Frame unwinding instructions" of the "Exception Handling ABI
2476 for the ARM Architecture" document. */
2477
2478 static struct arm_prologue_cache *
2479 arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry)
2480 {
2481 CORE_ADDR vsp = 0;
2482 int vsp_valid = 0;
2483
2484 struct arm_prologue_cache *cache;
2485 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2486 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2487
2488 for (;;)
2489 {
2490 gdb_byte insn;
2491
2492 /* Whenever we reload SP, we actually have to retrieve its
2493 actual value in the current frame. */
2494 if (!vsp_valid)
2495 {
2496 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2497 {
2498 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2499 vsp = get_frame_register_unsigned (this_frame, reg);
2500 }
2501 else
2502 {
2503 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr;
2504 vsp = get_frame_memory_unsigned (this_frame, addr, 4);
2505 }
2506
2507 vsp_valid = 1;
2508 }
2509
2510 /* Decode next unwind instruction. */
2511 insn = *entry++;
2512
2513 if ((insn & 0xc0) == 0)
2514 {
2515 int offset = insn & 0x3f;
2516 vsp += (offset << 2) + 4;
2517 }
2518 else if ((insn & 0xc0) == 0x40)
2519 {
2520 int offset = insn & 0x3f;
2521 vsp -= (offset << 2) + 4;
2522 }
2523 else if ((insn & 0xf0) == 0x80)
2524 {
2525 int mask = ((insn & 0xf) << 8) | *entry++;
2526 int i;
2527
2528 /* The special case of an all-zero mask identifies
2529 "Refuse to unwind". We return NULL to fall back
2530 to the prologue analyzer. */
2531 if (mask == 0)
2532 return NULL;
2533
2534 /* Pop registers r4..r15 under mask. */
2535 for (i = 0; i < 12; i++)
2536 if (mask & (1 << i))
2537 {
2538 cache->saved_regs[4 + i].addr = vsp;
2539 vsp += 4;
2540 }
2541
2542 /* Special-case popping SP -- we need to reload vsp. */
2543 if (mask & (1 << (ARM_SP_REGNUM - 4)))
2544 vsp_valid = 0;
2545 }
2546 else if ((insn & 0xf0) == 0x90)
2547 {
2548 int reg = insn & 0xf;
2549
2550 /* Reserved cases. */
2551 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM)
2552 return NULL;
2553
2554 /* Set SP from another register and mark VSP for reload. */
2555 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg];
2556 vsp_valid = 0;
2557 }
2558 else if ((insn & 0xf0) == 0xa0)
2559 {
2560 int count = insn & 0x7;
2561 int pop_lr = (insn & 0x8) != 0;
2562 int i;
2563
2564 /* Pop r4..r[4+count]. */
2565 for (i = 0; i <= count; i++)
2566 {
2567 cache->saved_regs[4 + i].addr = vsp;
2568 vsp += 4;
2569 }
2570
2571 /* If indicated by flag, pop LR as well. */
2572 if (pop_lr)
2573 {
2574 cache->saved_regs[ARM_LR_REGNUM].addr = vsp;
2575 vsp += 4;
2576 }
2577 }
2578 else if (insn == 0xb0)
2579 {
2580 /* We could only have updated PC by popping into it; if so, it
2581 will show up as address. Otherwise, copy LR into PC. */
2582 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM))
2583 cache->saved_regs[ARM_PC_REGNUM]
2584 = cache->saved_regs[ARM_LR_REGNUM];
2585
2586 /* We're done. */
2587 break;
2588 }
2589 else if (insn == 0xb1)
2590 {
2591 int mask = *entry++;
2592 int i;
2593
2594 /* All-zero mask and mask >= 16 is "spare". */
2595 if (mask == 0 || mask >= 16)
2596 return NULL;
2597
2598 /* Pop r0..r3 under mask. */
2599 for (i = 0; i < 4; i++)
2600 if (mask & (1 << i))
2601 {
2602 cache->saved_regs[i].addr = vsp;
2603 vsp += 4;
2604 }
2605 }
2606 else if (insn == 0xb2)
2607 {
2608 ULONGEST offset = 0;
2609 unsigned shift = 0;
2610
2611 do
2612 {
2613 offset |= (*entry & 0x7f) << shift;
2614 shift += 7;
2615 }
2616 while (*entry++ & 0x80);
2617
2618 vsp += 0x204 + (offset << 2);
2619 }
2620 else if (insn == 0xb3)
2621 {
2622 int start = *entry >> 4;
2623 int count = (*entry++) & 0xf;
2624 int i;
2625
2626 /* Only registers D0..D15 are valid here. */
2627 if (start + count >= 16)
2628 return NULL;
2629
2630 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2631 for (i = 0; i <= count; i++)
2632 {
2633 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2634 vsp += 8;
2635 }
2636
2637 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2638 vsp += 4;
2639 }
2640 else if ((insn & 0xf8) == 0xb8)
2641 {
2642 int count = insn & 0x7;
2643 int i;
2644
2645 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2646 for (i = 0; i <= count; i++)
2647 {
2648 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2649 vsp += 8;
2650 }
2651
2652 /* Add an extra 4 bytes for FSTMFDX-style stack. */
2653 vsp += 4;
2654 }
2655 else if (insn == 0xc6)
2656 {
2657 int start = *entry >> 4;
2658 int count = (*entry++) & 0xf;
2659 int i;
2660
2661 /* Only registers WR0..WR15 are valid. */
2662 if (start + count >= 16)
2663 return NULL;
2664
2665 /* Pop iwmmx registers WR[start]..WR[start+count]. */
2666 for (i = 0; i <= count; i++)
2667 {
2668 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp;
2669 vsp += 8;
2670 }
2671 }
2672 else if (insn == 0xc7)
2673 {
2674 int mask = *entry++;
2675 int i;
2676
2677 /* All-zero mask and mask >= 16 is "spare". */
2678 if (mask == 0 || mask >= 16)
2679 return NULL;
2680
2681 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */
2682 for (i = 0; i < 4; i++)
2683 if (mask & (1 << i))
2684 {
2685 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp;
2686 vsp += 4;
2687 }
2688 }
2689 else if ((insn & 0xf8) == 0xc0)
2690 {
2691 int count = insn & 0x7;
2692 int i;
2693
2694 /* Pop iwmmx registers WR[10]..WR[10+count]. */
2695 for (i = 0; i <= count; i++)
2696 {
2697 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp;
2698 vsp += 8;
2699 }
2700 }
2701 else if (insn == 0xc8)
2702 {
2703 int start = *entry >> 4;
2704 int count = (*entry++) & 0xf;
2705 int i;
2706
2707 /* Only registers D0..D31 are valid. */
2708 if (start + count >= 16)
2709 return NULL;
2710
2711 /* Pop VFP double-precision registers
2712 D[16+start]..D[16+start+count]. */
2713 for (i = 0; i <= count; i++)
2714 {
2715 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp;
2716 vsp += 8;
2717 }
2718 }
2719 else if (insn == 0xc9)
2720 {
2721 int start = *entry >> 4;
2722 int count = (*entry++) & 0xf;
2723 int i;
2724
2725 /* Pop VFP double-precision registers D[start]..D[start+count]. */
2726 for (i = 0; i <= count; i++)
2727 {
2728 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp;
2729 vsp += 8;
2730 }
2731 }
2732 else if ((insn & 0xf8) == 0xd0)
2733 {
2734 int count = insn & 0x7;
2735 int i;
2736
2737 /* Pop VFP double-precision registers D[8]..D[8+count]. */
2738 for (i = 0; i <= count; i++)
2739 {
2740 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp;
2741 vsp += 8;
2742 }
2743 }
2744 else
2745 {
2746 /* Everything else is "spare". */
2747 return NULL;
2748 }
2749 }
2750
2751 /* If we restore SP from a register, assume this was the frame register.
2752 Otherwise just fall back to SP as frame register. */
2753 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM))
2754 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg;
2755 else
2756 cache->framereg = ARM_SP_REGNUM;
2757
2758 /* Determine offset to previous frame. */
2759 cache->framesize
2760 = vsp - get_frame_register_unsigned (this_frame, cache->framereg);
2761
2762 /* We already got the previous SP. */
2763 cache->prev_sp = vsp;
2764
2765 return cache;
2766 }
2767
2768 /* Unwinding via ARM exception table entries. Note that the sniffer
2769 already computes a filled-in prologue cache, which is then used
2770 with the same arm_prologue_this_id and arm_prologue_prev_register
2771 routines also used for prologue-parsing based unwinding. */
2772
2773 static int
2774 arm_exidx_unwind_sniffer (const struct frame_unwind *self,
2775 struct frame_info *this_frame,
2776 void **this_prologue_cache)
2777 {
2778 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2779 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2780 CORE_ADDR addr_in_block, exidx_region, func_start;
2781 struct arm_prologue_cache *cache;
2782 gdb_byte *entry;
2783
2784 /* See if we have an ARM exception table entry covering this address. */
2785 addr_in_block = get_frame_address_in_block (this_frame);
2786 entry = arm_find_exidx_entry (addr_in_block, &exidx_region);
2787 if (!entry)
2788 return 0;
2789
2790 /* The ARM exception table does not describe unwind information
2791 for arbitrary PC values, but is guaranteed to be correct only
2792 at call sites. We have to decide here whether we want to use
2793 ARM exception table information for this frame, or fall back
2794 to using prologue parsing. (Note that if we have DWARF CFI,
2795 this sniffer isn't even called -- CFI is always preferred.)
2796
2797 Before we make this decision, however, we check whether we
2798 actually have *symbol* information for the current frame.
2799 If not, prologue parsing would not work anyway, so we might
2800 as well use the exception table and hope for the best. */
2801 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL))
2802 {
2803 int exc_valid = 0;
2804
2805 /* If the next frame is "normal", we are at a call site in this
2806 frame, so exception information is guaranteed to be valid. */
2807 if (get_next_frame (this_frame)
2808 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME)
2809 exc_valid = 1;
2810
2811 /* We also assume exception information is valid if we're currently
2812 blocked in a system call. The system library is supposed to
2813 ensure this, so that e.g. pthread cancellation works. */
2814 if (arm_frame_is_thumb (this_frame))
2815 {
2816 LONGEST insn;
2817
2818 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2,
2819 byte_order_for_code, &insn)
2820 && (insn & 0xff00) == 0xdf00 /* svc */)
2821 exc_valid = 1;
2822 }
2823 else
2824 {
2825 LONGEST insn;
2826
2827 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4,
2828 byte_order_for_code, &insn)
2829 && (insn & 0x0f000000) == 0x0f000000 /* svc */)
2830 exc_valid = 1;
2831 }
2832
2833 /* Bail out if we don't know that exception information is valid. */
2834 if (!exc_valid)
2835 return 0;
2836
2837 /* The ARM exception index does not mark the *end* of the region
2838 covered by the entry, and some functions will not have any entry.
2839 To correctly recognize the end of the covered region, the linker
2840 should have inserted dummy records with a CANTUNWIND marker.
2841
2842 Unfortunately, current versions of GNU ld do not reliably do
2843 this, and thus we may have found an incorrect entry above.
2844 As a (temporary) sanity check, we only use the entry if it
2845 lies *within* the bounds of the function. Note that this check
2846 might reject perfectly valid entries that just happen to cover
2847 multiple functions; therefore this check ought to be removed
2848 once the linker is fixed. */
2849 if (func_start > exidx_region)
2850 return 0;
2851 }
2852
2853 /* Decode the list of unwinding instructions into a prologue cache.
2854 Note that this may fail due to e.g. a "refuse to unwind" code. */
2855 cache = arm_exidx_fill_cache (this_frame, entry);
2856 if (!cache)
2857 return 0;
2858
2859 *this_prologue_cache = cache;
2860 return 1;
2861 }
2862
2863 struct frame_unwind arm_exidx_unwind = {
2864 NORMAL_FRAME,
2865 default_frame_unwind_stop_reason,
2866 arm_prologue_this_id,
2867 arm_prologue_prev_register,
2868 NULL,
2869 arm_exidx_unwind_sniffer
2870 };
2871
2872 static struct arm_prologue_cache *
2873 arm_make_stub_cache (struct frame_info *this_frame)
2874 {
2875 struct arm_prologue_cache *cache;
2876
2877 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2878 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2879
2880 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM);
2881
2882 return cache;
2883 }
2884
2885 /* Our frame ID for a stub frame is the current SP and LR. */
2886
2887 static void
2888 arm_stub_this_id (struct frame_info *this_frame,
2889 void **this_cache,
2890 struct frame_id *this_id)
2891 {
2892 struct arm_prologue_cache *cache;
2893
2894 if (*this_cache == NULL)
2895 *this_cache = arm_make_stub_cache (this_frame);
2896 cache = *this_cache;
2897
2898 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
2899 }
2900
2901 static int
2902 arm_stub_unwind_sniffer (const struct frame_unwind *self,
2903 struct frame_info *this_frame,
2904 void **this_prologue_cache)
2905 {
2906 CORE_ADDR addr_in_block;
2907 gdb_byte dummy[4];
2908
2909 addr_in_block = get_frame_address_in_block (this_frame);
2910 if (in_plt_section (addr_in_block)
2911 /* We also use the stub winder if the target memory is unreadable
2912 to avoid having the prologue unwinder trying to read it. */
2913 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
2914 return 1;
2915
2916 return 0;
2917 }
2918
2919 struct frame_unwind arm_stub_unwind = {
2920 NORMAL_FRAME,
2921 default_frame_unwind_stop_reason,
2922 arm_stub_this_id,
2923 arm_prologue_prev_register,
2924 NULL,
2925 arm_stub_unwind_sniffer
2926 };
2927
2928 /* Put here the code to store, into CACHE->saved_regs, the addresses
2929 of the saved registers of frame described by THIS_FRAME. CACHE is
2930 returned. */
2931
2932 static struct arm_prologue_cache *
2933 arm_m_exception_cache (struct frame_info *this_frame)
2934 {
2935 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2937 struct arm_prologue_cache *cache;
2938 CORE_ADDR unwound_sp;
2939 LONGEST xpsr;
2940
2941 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache);
2942 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
2943
2944 unwound_sp = get_frame_register_unsigned (this_frame,
2945 ARM_SP_REGNUM);
2946
2947 /* The hardware saves eight 32-bit words, comprising xPSR,
2948 ReturnAddress, LR (R14), R12, R3, R2, R1, R0. See details in
2949 "B1.5.6 Exception entry behavior" in
2950 "ARMv7-M Architecture Reference Manual". */
2951 cache->saved_regs[0].addr = unwound_sp;
2952 cache->saved_regs[1].addr = unwound_sp + 4;
2953 cache->saved_regs[2].addr = unwound_sp + 8;
2954 cache->saved_regs[3].addr = unwound_sp + 12;
2955 cache->saved_regs[12].addr = unwound_sp + 16;
2956 cache->saved_regs[14].addr = unwound_sp + 20;
2957 cache->saved_regs[15].addr = unwound_sp + 24;
2958 cache->saved_regs[ARM_PS_REGNUM].addr = unwound_sp + 28;
2959
2960 /* If bit 9 of the saved xPSR is set, then there is a four-byte
2961 aligner between the top of the 32-byte stack frame and the
2962 previous context's stack pointer. */
2963 cache->prev_sp = unwound_sp + 32;
2964 if (safe_read_memory_integer (unwound_sp + 28, 4, byte_order, &xpsr)
2965 && (xpsr & (1 << 9)) != 0)
2966 cache->prev_sp += 4;
2967
2968 return cache;
2969 }
2970
2971 /* Implementation of function hook 'this_id' in
2972 'struct frame_uwnind'. */
2973
2974 static void
2975 arm_m_exception_this_id (struct frame_info *this_frame,
2976 void **this_cache,
2977 struct frame_id *this_id)
2978 {
2979 struct arm_prologue_cache *cache;
2980
2981 if (*this_cache == NULL)
2982 *this_cache = arm_m_exception_cache (this_frame);
2983 cache = *this_cache;
2984
2985 /* Our frame ID for a stub frame is the current SP and LR. */
2986 *this_id = frame_id_build (cache->prev_sp,
2987 get_frame_pc (this_frame));
2988 }
2989
2990 /* Implementation of function hook 'prev_register' in
2991 'struct frame_uwnind'. */
2992
2993 static struct value *
2994 arm_m_exception_prev_register (struct frame_info *this_frame,
2995 void **this_cache,
2996 int prev_regnum)
2997 {
2998 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2999 struct arm_prologue_cache *cache;
3000
3001 if (*this_cache == NULL)
3002 *this_cache = arm_m_exception_cache (this_frame);
3003 cache = *this_cache;
3004
3005 /* The value was already reconstructed into PREV_SP. */
3006 if (prev_regnum == ARM_SP_REGNUM)
3007 return frame_unwind_got_constant (this_frame, prev_regnum,
3008 cache->prev_sp);
3009
3010 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
3011 prev_regnum);
3012 }
3013
3014 /* Implementation of function hook 'sniffer' in
3015 'struct frame_uwnind'. */
3016
3017 static int
3018 arm_m_exception_unwind_sniffer (const struct frame_unwind *self,
3019 struct frame_info *this_frame,
3020 void **this_prologue_cache)
3021 {
3022 CORE_ADDR this_pc = get_frame_pc (this_frame);
3023
3024 /* No need to check is_m; this sniffer is only registered for
3025 M-profile architectures. */
3026
3027 /* Exception frames return to one of these magic PCs. Other values
3028 are not defined as of v7-M. See details in "B1.5.8 Exception
3029 return behavior" in "ARMv7-M Architecture Reference Manual". */
3030 if (this_pc == 0xfffffff1 || this_pc == 0xfffffff9
3031 || this_pc == 0xfffffffd)
3032 return 1;
3033
3034 return 0;
3035 }
3036
3037 /* Frame unwinder for M-profile exceptions. */
3038
3039 struct frame_unwind arm_m_exception_unwind =
3040 {
3041 SIGTRAMP_FRAME,
3042 default_frame_unwind_stop_reason,
3043 arm_m_exception_this_id,
3044 arm_m_exception_prev_register,
3045 NULL,
3046 arm_m_exception_unwind_sniffer
3047 };
3048
3049 static CORE_ADDR
3050 arm_normal_frame_base (struct frame_info *this_frame, void **this_cache)
3051 {
3052 struct arm_prologue_cache *cache;
3053
3054 if (*this_cache == NULL)
3055 *this_cache = arm_make_prologue_cache (this_frame);
3056 cache = *this_cache;
3057
3058 return cache->prev_sp - cache->framesize;
3059 }
3060
3061 struct frame_base arm_normal_base = {
3062 &arm_prologue_unwind,
3063 arm_normal_frame_base,
3064 arm_normal_frame_base,
3065 arm_normal_frame_base
3066 };
3067
3068 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
3069 dummy frame. The frame ID's base needs to match the TOS value
3070 saved by save_dummy_frame_tos() and returned from
3071 arm_push_dummy_call, and the PC needs to match the dummy frame's
3072 breakpoint. */
3073
3074 static struct frame_id
3075 arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3076 {
3077 return frame_id_build (get_frame_register_unsigned (this_frame,
3078 ARM_SP_REGNUM),
3079 get_frame_pc (this_frame));
3080 }
3081
3082 /* Given THIS_FRAME, find the previous frame's resume PC (which will
3083 be used to construct the previous frame's ID, after looking up the
3084 containing function). */
3085
3086 static CORE_ADDR
3087 arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
3088 {
3089 CORE_ADDR pc;
3090 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM);
3091 return arm_addr_bits_remove (gdbarch, pc);
3092 }
3093
3094 static CORE_ADDR
3095 arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
3096 {
3097 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM);
3098 }
3099
3100 static struct value *
3101 arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache,
3102 int regnum)
3103 {
3104 struct gdbarch * gdbarch = get_frame_arch (this_frame);
3105 CORE_ADDR lr, cpsr;
3106 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch);
3107
3108 switch (regnum)
3109 {
3110 case ARM_PC_REGNUM:
3111 /* The PC is normally copied from the return column, which
3112 describes saves of LR. However, that version may have an
3113 extra bit set to indicate Thumb state. The bit is not
3114 part of the PC. */
3115 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3116 return frame_unwind_got_constant (this_frame, regnum,
3117 arm_addr_bits_remove (gdbarch, lr));
3118
3119 case ARM_PS_REGNUM:
3120 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */
3121 cpsr = get_frame_register_unsigned (this_frame, regnum);
3122 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM);
3123 if (IS_THUMB_ADDR (lr))
3124 cpsr |= t_bit;
3125 else
3126 cpsr &= ~t_bit;
3127 return frame_unwind_got_constant (this_frame, regnum, cpsr);
3128
3129 default:
3130 internal_error (__FILE__, __LINE__,
3131 _("Unexpected register %d"), regnum);
3132 }
3133 }
3134
3135 static void
3136 arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
3137 struct dwarf2_frame_state_reg *reg,
3138 struct frame_info *this_frame)
3139 {
3140 switch (regnum)
3141 {
3142 case ARM_PC_REGNUM:
3143 case ARM_PS_REGNUM:
3144 reg->how = DWARF2_FRAME_REG_FN;
3145 reg->loc.fn = arm_dwarf2_prev_register;
3146 break;
3147 case ARM_SP_REGNUM:
3148 reg->how = DWARF2_FRAME_REG_CFA;
3149 break;
3150 }
3151 }
3152
3153 /* Return true if we are in the function's epilogue, i.e. after the
3154 instruction that destroyed the function's stack frame. */
3155
3156 static int
3157 thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3158 {
3159 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3160 unsigned int insn, insn2;
3161 int found_return = 0, found_stack_adjust = 0;
3162 CORE_ADDR func_start, func_end;
3163 CORE_ADDR scan_pc;
3164 gdb_byte buf[4];
3165
3166 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3167 return 0;
3168
3169 /* The epilogue is a sequence of instructions along the following lines:
3170
3171 - add stack frame size to SP or FP
3172 - [if frame pointer used] restore SP from FP
3173 - restore registers from SP [may include PC]
3174 - a return-type instruction [if PC wasn't already restored]
3175
3176 In a first pass, we scan forward from the current PC and verify the
3177 instructions we find as compatible with this sequence, ending in a
3178 return instruction.
3179
3180 However, this is not sufficient to distinguish indirect function calls
3181 within a function from indirect tail calls in the epilogue in some cases.
3182 Therefore, if we didn't already find any SP-changing instruction during
3183 forward scan, we add a backward scanning heuristic to ensure we actually
3184 are in the epilogue. */
3185
3186 scan_pc = pc;
3187 while (scan_pc < func_end && !found_return)
3188 {
3189 if (target_read_memory (scan_pc, buf, 2))
3190 break;
3191
3192 scan_pc += 2;
3193 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3194
3195 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */
3196 found_return = 1;
3197 else if (insn == 0x46f7) /* mov pc, lr */
3198 found_return = 1;
3199 else if (insn == 0x46bd) /* mov sp, r7 */
3200 found_stack_adjust = 1;
3201 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3202 found_stack_adjust = 1;
3203 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */
3204 {
3205 found_stack_adjust = 1;
3206 if (insn & 0x0100) /* <registers> include PC. */
3207 found_return = 1;
3208 }
3209 else if (thumb_insn_size (insn) == 4) /* 32-bit Thumb-2 instruction */
3210 {
3211 if (target_read_memory (scan_pc, buf, 2))
3212 break;
3213
3214 scan_pc += 2;
3215 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code);
3216
3217 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3218 {
3219 found_stack_adjust = 1;
3220 if (insn2 & 0x8000) /* <registers> include PC. */
3221 found_return = 1;
3222 }
3223 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3224 && (insn2 & 0x0fff) == 0x0b04)
3225 {
3226 found_stack_adjust = 1;
3227 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */
3228 found_return = 1;
3229 }
3230 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3231 && (insn2 & 0x0e00) == 0x0a00)
3232 found_stack_adjust = 1;
3233 else
3234 break;
3235 }
3236 else
3237 break;
3238 }
3239
3240 if (!found_return)
3241 return 0;
3242
3243 /* Since any instruction in the epilogue sequence, with the possible
3244 exception of return itself, updates the stack pointer, we need to
3245 scan backwards for at most one instruction. Try either a 16-bit or
3246 a 32-bit instruction. This is just a heuristic, so we do not worry
3247 too much about false positives. */
3248
3249 if (!found_stack_adjust)
3250 {
3251 if (pc - 4 < func_start)
3252 return 0;
3253 if (target_read_memory (pc - 4, buf, 4))
3254 return 0;
3255
3256 insn = extract_unsigned_integer (buf, 2, byte_order_for_code);
3257 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code);
3258
3259 if (insn2 == 0x46bd) /* mov sp, r7 */
3260 found_stack_adjust = 1;
3261 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */
3262 found_stack_adjust = 1;
3263 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */
3264 found_stack_adjust = 1;
3265 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */
3266 found_stack_adjust = 1;
3267 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */
3268 && (insn2 & 0x0fff) == 0x0b04)
3269 found_stack_adjust = 1;
3270 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */
3271 && (insn2 & 0x0e00) == 0x0a00)
3272 found_stack_adjust = 1;
3273 }
3274
3275 return found_stack_adjust;
3276 }
3277
3278 /* Return true if we are in the function's epilogue, i.e. after the
3279 instruction that destroyed the function's stack frame. */
3280
3281 static int
3282 arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
3283 {
3284 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3285 unsigned int insn;
3286 int found_return, found_stack_adjust;
3287 CORE_ADDR func_start, func_end;
3288
3289 if (arm_pc_is_thumb (gdbarch, pc))
3290 return thumb_in_function_epilogue_p (gdbarch, pc);
3291
3292 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
3293 return 0;
3294
3295 /* We are in the epilogue if the previous instruction was a stack
3296 adjustment and the next instruction is a possible return (bx, mov
3297 pc, or pop). We could have to scan backwards to find the stack
3298 adjustment, or forwards to find the return, but this is a decent
3299 approximation. First scan forwards. */
3300
3301 found_return = 0;
3302 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
3303 if (bits (insn, 28, 31) != INST_NV)
3304 {
3305 if ((insn & 0x0ffffff0) == 0x012fff10)
3306 /* BX. */
3307 found_return = 1;
3308 else if ((insn & 0x0ffffff0) == 0x01a0f000)
3309 /* MOV PC. */
3310 found_return = 1;
3311 else if ((insn & 0x0fff0000) == 0x08bd0000
3312 && (insn & 0x0000c000) != 0)
3313 /* POP (LDMIA), including PC or LR. */
3314 found_return = 1;
3315 }
3316
3317 if (!found_return)
3318 return 0;
3319
3320 /* Scan backwards. This is just a heuristic, so do not worry about
3321 false positives from mode changes. */
3322
3323 if (pc < func_start + 4)
3324 return 0;
3325
3326 found_stack_adjust = 0;
3327 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code);
3328 if (bits (insn, 28, 31) != INST_NV)
3329 {
3330 if ((insn & 0x0df0f000) == 0x0080d000)
3331 /* ADD SP (register or immediate). */
3332 found_stack_adjust = 1;
3333 else if ((insn & 0x0df0f000) == 0x0040d000)
3334 /* SUB SP (register or immediate). */
3335 found_stack_adjust = 1;
3336 else if ((insn & 0x0ffffff0) == 0x01a0d000)
3337 /* MOV SP. */
3338 found_stack_adjust = 1;
3339 else if ((insn & 0x0fff0000) == 0x08bd0000)
3340 /* POP (LDMIA). */
3341 found_stack_adjust = 1;
3342 else if ((insn & 0x0fff0000) == 0x049d0000)
3343 /* POP of a single register. */
3344 found_stack_adjust = 1;
3345 }
3346
3347 if (found_stack_adjust)
3348 return 1;
3349
3350 return 0;
3351 }
3352
3353
3354 /* When arguments must be pushed onto the stack, they go on in reverse
3355 order. The code below implements a FILO (stack) to do this. */
3356
3357 struct stack_item
3358 {
3359 int len;
3360 struct stack_item *prev;
3361 void *data;
3362 };
3363
3364 static struct stack_item *
3365 push_stack_item (struct stack_item *prev, const void *contents, int len)
3366 {
3367 struct stack_item *si;
3368 si = xmalloc (sizeof (struct stack_item));
3369 si->data = xmalloc (len);
3370 si->len = len;
3371 si->prev = prev;
3372 memcpy (si->data, contents, len);
3373 return si;
3374 }
3375
3376 static struct stack_item *
3377 pop_stack_item (struct stack_item *si)
3378 {
3379 struct stack_item *dead = si;
3380 si = si->prev;
3381 xfree (dead->data);
3382 xfree (dead);
3383 return si;
3384 }
3385
3386
3387 /* Return the alignment (in bytes) of the given type. */
3388
3389 static int
3390 arm_type_align (struct type *t)
3391 {
3392 int n;
3393 int align;
3394 int falign;
3395
3396 t = check_typedef (t);
3397 switch (TYPE_CODE (t))
3398 {
3399 default:
3400 /* Should never happen. */
3401 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
3402 return 4;
3403
3404 case TYPE_CODE_PTR:
3405 case TYPE_CODE_ENUM:
3406 case TYPE_CODE_INT:
3407 case TYPE_CODE_FLT:
3408 case TYPE_CODE_SET:
3409 case TYPE_CODE_RANGE:
3410 case TYPE_CODE_REF:
3411 case TYPE_CODE_CHAR:
3412 case TYPE_CODE_BOOL:
3413 return TYPE_LENGTH (t);
3414
3415 case TYPE_CODE_ARRAY:
3416 case TYPE_CODE_COMPLEX:
3417 /* TODO: What about vector types? */
3418 return arm_type_align (TYPE_TARGET_TYPE (t));
3419
3420 case TYPE_CODE_STRUCT:
3421 case TYPE_CODE_UNION:
3422 align = 1;
3423 for (n = 0; n < TYPE_NFIELDS (t); n++)
3424 {
3425 falign = arm_type_align (TYPE_FIELD_TYPE (t, n));
3426 if (falign > align)
3427 align = falign;
3428 }
3429 return align;
3430 }
3431 }
3432
3433 /* Possible base types for a candidate for passing and returning in
3434 VFP registers. */
3435
3436 enum arm_vfp_cprc_base_type
3437 {
3438 VFP_CPRC_UNKNOWN,
3439 VFP_CPRC_SINGLE,
3440 VFP_CPRC_DOUBLE,
3441 VFP_CPRC_VEC64,
3442 VFP_CPRC_VEC128
3443 };
3444
3445 /* The length of one element of base type B. */
3446
3447 static unsigned
3448 arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b)
3449 {
3450 switch (b)
3451 {
3452 case VFP_CPRC_SINGLE:
3453 return 4;
3454 case VFP_CPRC_DOUBLE:
3455 return 8;
3456 case VFP_CPRC_VEC64:
3457 return 8;
3458 case VFP_CPRC_VEC128:
3459 return 16;
3460 default:
3461 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3462 (int) b);
3463 }
3464 }
3465
3466 /* The character ('s', 'd' or 'q') for the type of VFP register used
3467 for passing base type B. */
3468
3469 static int
3470 arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b)
3471 {
3472 switch (b)
3473 {
3474 case VFP_CPRC_SINGLE:
3475 return 's';
3476 case VFP_CPRC_DOUBLE:
3477 return 'd';
3478 case VFP_CPRC_VEC64:
3479 return 'd';
3480 case VFP_CPRC_VEC128:
3481 return 'q';
3482 default:
3483 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."),
3484 (int) b);
3485 }
3486 }
3487
3488 /* Determine whether T may be part of a candidate for passing and
3489 returning in VFP registers, ignoring the limit on the total number
3490 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the
3491 classification of the first valid component found; if it is not
3492 VFP_CPRC_UNKNOWN, all components must have the same classification
3493 as *BASE_TYPE. If it is found that T contains a type not permitted
3494 for passing and returning in VFP registers, a type differently
3495 classified from *BASE_TYPE, or two types differently classified
3496 from each other, return -1, otherwise return the total number of
3497 base-type elements found (possibly 0 in an empty structure or
3498 array). Vectors and complex types are not currently supported,
3499 matching the generic AAPCS support. */
3500
3501 static int
3502 arm_vfp_cprc_sub_candidate (struct type *t,
3503 enum arm_vfp_cprc_base_type *base_type)
3504 {
3505 t = check_typedef (t);
3506 switch (TYPE_CODE (t))
3507 {
3508 case TYPE_CODE_FLT:
3509 switch (TYPE_LENGTH (t))
3510 {
3511 case 4:
3512 if (*base_type == VFP_CPRC_UNKNOWN)
3513 *base_type = VFP_CPRC_SINGLE;
3514 else if (*base_type != VFP_CPRC_SINGLE)
3515 return -1;
3516 return 1;
3517
3518 case 8:
3519 if (*base_type == VFP_CPRC_UNKNOWN)
3520 *base_type = VFP_CPRC_DOUBLE;
3521 else if (*base_type != VFP_CPRC_DOUBLE)
3522 return -1;
3523 return 1;
3524
3525 default:
3526 return -1;
3527 }
3528 break;
3529
3530 case TYPE_CODE_ARRAY:
3531 {
3532 int count;
3533 unsigned unitlen;
3534 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type);
3535 if (count == -1)
3536 return -1;
3537 if (TYPE_LENGTH (t) == 0)
3538 {
3539 gdb_assert (count == 0);
3540 return 0;
3541 }
3542 else if (count == 0)
3543 return -1;
3544 unitlen = arm_vfp_cprc_unit_length (*base_type);
3545 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0);
3546 return TYPE_LENGTH (t) / unitlen;
3547 }
3548 break;
3549
3550 case TYPE_CODE_STRUCT:
3551 {
3552 int count = 0;
3553 unsigned unitlen;
3554 int i;
3555 for (i = 0; i < TYPE_NFIELDS (t); i++)
3556 {
3557 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3558 base_type);
3559 if (sub_count == -1)
3560 return -1;
3561 count += sub_count;
3562 }
3563 if (TYPE_LENGTH (t) == 0)
3564 {
3565 gdb_assert (count == 0);
3566 return 0;
3567 }
3568 else if (count == 0)
3569 return -1;
3570 unitlen = arm_vfp_cprc_unit_length (*base_type);
3571 if (TYPE_LENGTH (t) != unitlen * count)
3572 return -1;
3573 return count;
3574 }
3575
3576 case TYPE_CODE_UNION:
3577 {
3578 int count = 0;
3579 unsigned unitlen;
3580 int i;
3581 for (i = 0; i < TYPE_NFIELDS (t); i++)
3582 {
3583 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i),
3584 base_type);
3585 if (sub_count == -1)
3586 return -1;
3587 count = (count > sub_count ? count : sub_count);
3588 }
3589 if (TYPE_LENGTH (t) == 0)
3590 {
3591 gdb_assert (count == 0);
3592 return 0;
3593 }
3594 else if (count == 0)
3595 return -1;
3596 unitlen = arm_vfp_cprc_unit_length (*base_type);
3597 if (TYPE_LENGTH (t) != unitlen * count)
3598 return -1;
3599 return count;
3600 }
3601
3602 default:
3603 break;
3604 }
3605
3606 return -1;
3607 }
3608
3609 /* Determine whether T is a VFP co-processor register candidate (CPRC)
3610 if passed to or returned from a non-variadic function with the VFP
3611 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set
3612 *BASE_TYPE to the base type for T and *COUNT to the number of
3613 elements of that base type before returning. */
3614
3615 static int
3616 arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type,
3617 int *count)
3618 {
3619 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN;
3620 int c = arm_vfp_cprc_sub_candidate (t, &b);
3621 if (c <= 0 || c > 4)
3622 return 0;
3623 *base_type = b;
3624 *count = c;
3625 return 1;
3626 }
3627
3628 /* Return 1 if the VFP ABI should be used for passing arguments to and
3629 returning values from a function of type FUNC_TYPE, 0
3630 otherwise. */
3631
3632 static int
3633 arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type)
3634 {
3635 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3636 /* Variadic functions always use the base ABI. Assume that functions
3637 without debug info are not variadic. */
3638 if (func_type && TYPE_VARARGS (check_typedef (func_type)))
3639 return 0;
3640 /* The VFP ABI is only supported as a variant of AAPCS. */
3641 if (tdep->arm_abi != ARM_ABI_AAPCS)
3642 return 0;
3643 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP;
3644 }
3645
3646 /* We currently only support passing parameters in integer registers, which
3647 conforms with GCC's default model, and VFP argument passing following
3648 the VFP variant of AAPCS. Several other variants exist and
3649 we should probably support some of them based on the selected ABI. */
3650
3651 static CORE_ADDR
3652 arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3653 struct regcache *regcache, CORE_ADDR bp_addr, int nargs,
3654 struct value **args, CORE_ADDR sp, int struct_return,
3655 CORE_ADDR struct_addr)
3656 {
3657 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3658 int argnum;
3659 int argreg;
3660 int nstack;
3661 struct stack_item *si = NULL;
3662 int use_vfp_abi;
3663 struct type *ftype;
3664 unsigned vfp_regs_free = (1 << 16) - 1;
3665
3666 /* Determine the type of this function and whether the VFP ABI
3667 applies. */
3668 ftype = check_typedef (value_type (function));
3669 if (TYPE_CODE (ftype) == TYPE_CODE_PTR)
3670 ftype = check_typedef (TYPE_TARGET_TYPE (ftype));
3671 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype);
3672
3673 /* Set the return address. For the ARM, the return breakpoint is
3674 always at BP_ADDR. */
3675 if (arm_pc_is_thumb (gdbarch, bp_addr))
3676 bp_addr |= 1;
3677 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr);
3678
3679 /* Walk through the list of args and determine how large a temporary
3680 stack is required. Need to take care here as structs may be
3681 passed on the stack, and we have to push them. */
3682 nstack = 0;
3683
3684 argreg = ARM_A1_REGNUM;
3685 nstack = 0;
3686
3687 /* The struct_return pointer occupies the first parameter
3688 passing register. */
3689 if (struct_return)
3690 {
3691 if (arm_debug)
3692 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n",
3693 gdbarch_register_name (gdbarch, argreg),
3694 paddress (gdbarch, struct_addr));
3695 regcache_cooked_write_unsigned (regcache, argreg, struct_addr);
3696 argreg++;
3697 }
3698
3699 for (argnum = 0; argnum < nargs; argnum++)
3700 {
3701 int len;
3702 struct type *arg_type;
3703 struct type *target_type;
3704 enum type_code typecode;
3705 const bfd_byte *val;
3706 int align;
3707 enum arm_vfp_cprc_base_type vfp_base_type;
3708 int vfp_base_count;
3709 int may_use_core_reg = 1;
3710
3711 arg_type = check_typedef (value_type (args[argnum]));
3712 len = TYPE_LENGTH (arg_type);
3713 target_type = TYPE_TARGET_TYPE (arg_type);
3714 typecode = TYPE_CODE (arg_type);
3715 val = value_contents (args[argnum]);
3716
3717 align = arm_type_align (arg_type);
3718 /* Round alignment up to a whole number of words. */
3719 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1);
3720 /* Different ABIs have different maximum alignments. */
3721 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS)
3722 {
3723 /* The APCS ABI only requires word alignment. */
3724 align = INT_REGISTER_SIZE;
3725 }
3726 else
3727 {
3728 /* The AAPCS requires at most doubleword alignment. */
3729 if (align > INT_REGISTER_SIZE * 2)
3730 align = INT_REGISTER_SIZE * 2;
3731 }
3732
3733 if (use_vfp_abi
3734 && arm_vfp_call_candidate (arg_type, &vfp_base_type,
3735 &vfp_base_count))
3736 {
3737 int regno;
3738 int unit_length;
3739 int shift;
3740 unsigned mask;
3741
3742 /* Because this is a CPRC it cannot go in a core register or
3743 cause a core register to be skipped for alignment.
3744 Either it goes in VFP registers and the rest of this loop
3745 iteration is skipped for this argument, or it goes on the
3746 stack (and the stack alignment code is correct for this
3747 case). */
3748 may_use_core_reg = 0;
3749
3750 unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
3751 shift = unit_length / 4;
3752 mask = (1 << (shift * vfp_base_count)) - 1;
3753 for (regno = 0; regno < 16; regno += shift)
3754 if (((vfp_regs_free >> regno) & mask) == mask)
3755 break;
3756
3757 if (regno < 16)
3758 {
3759 int reg_char;
3760 int reg_scaled;
3761 int i;
3762
3763 vfp_regs_free &= ~(mask << regno);
3764 reg_scaled = regno / shift;
3765 reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
3766 for (i = 0; i < vfp_base_count; i++)
3767 {
3768 char name_buf[4];
3769 int regnum;
3770 if (reg_char == 'q')
3771 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i,
3772 val + i * unit_length);
3773 else
3774 {
3775 xsnprintf (name_buf, sizeof (name_buf), "%c%d",
3776 reg_char, reg_scaled + i);
3777 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
3778 strlen (name_buf));
3779 regcache_cooked_write (regcache, regnum,
3780 val + i * unit_length);
3781 }
3782 }
3783 continue;
3784 }
3785 else
3786 {
3787 /* This CPRC could not go in VFP registers, so all VFP
3788 registers are now marked as used. */
3789 vfp_regs_free = 0;
3790 }
3791 }
3792
3793 /* Push stack padding for dowubleword alignment. */
3794 if (nstack & (align - 1))
3795 {
3796 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3797 nstack += INT_REGISTER_SIZE;
3798 }
3799
3800 /* Doubleword aligned quantities must go in even register pairs. */
3801 if (may_use_core_reg
3802 && argreg <= ARM_LAST_ARG_REGNUM
3803 && align > INT_REGISTER_SIZE
3804 && argreg & 1)
3805 argreg++;
3806
3807 /* If the argument is a pointer to a function, and it is a
3808 Thumb function, create a LOCAL copy of the value and set
3809 the THUMB bit in it. */
3810 if (TYPE_CODE_PTR == typecode
3811 && target_type != NULL
3812 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type)))
3813 {
3814 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order);
3815 if (arm_pc_is_thumb (gdbarch, regval))
3816 {
3817 bfd_byte *copy = alloca (len);
3818 store_unsigned_integer (copy, len, byte_order,
3819 MAKE_THUMB_ADDR (regval));
3820 val = copy;
3821 }
3822 }
3823
3824 /* Copy the argument to general registers or the stack in
3825 register-sized pieces. Large arguments are split between
3826 registers and stack. */
3827 while (len > 0)
3828 {
3829 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE;
3830
3831 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM)
3832 {
3833 /* The argument is being passed in a general purpose
3834 register. */
3835 CORE_ADDR regval
3836 = extract_unsigned_integer (val, partial_len, byte_order);
3837 if (byte_order == BFD_ENDIAN_BIG)
3838 regval <<= (INT_REGISTER_SIZE - partial_len) * 8;
3839 if (arm_debug)
3840 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
3841 argnum,
3842 gdbarch_register_name
3843 (gdbarch, argreg),
3844 phex (regval, INT_REGISTER_SIZE));
3845 regcache_cooked_write_unsigned (regcache, argreg, regval);
3846 argreg++;
3847 }
3848 else
3849 {
3850 /* Push the arguments onto the stack. */
3851 if (arm_debug)
3852 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n",
3853 argnum, nstack);
3854 si = push_stack_item (si, val, INT_REGISTER_SIZE);
3855 nstack += INT_REGISTER_SIZE;
3856 }
3857
3858 len -= partial_len;
3859 val += partial_len;
3860 }
3861 }
3862 /* If we have an odd number of words to push, then decrement the stack
3863 by one word now, so first stack argument will be dword aligned. */
3864 if (nstack & 4)
3865 sp -= 4;
3866
3867 while (si)
3868 {
3869 sp -= si->len;
3870 write_memory (sp, si->data, si->len);
3871 si = pop_stack_item (si);
3872 }
3873
3874 /* Finally, update teh SP register. */
3875 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp);
3876
3877 return sp;
3878 }
3879
3880
3881 /* Always align the frame to an 8-byte boundary. This is required on
3882 some platforms and harmless on the rest. */
3883
3884 static CORE_ADDR
3885 arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3886 {
3887 /* Align the stack to eight bytes. */
3888 return sp & ~ (CORE_ADDR) 7;
3889 }
3890
3891 static void
3892 print_fpu_flags (int flags)
3893 {
3894 if (flags & (1 << 0))
3895 fputs ("IVO ", stdout);
3896 if (flags & (1 << 1))
3897 fputs ("DVZ ", stdout);
3898 if (flags & (1 << 2))
3899 fputs ("OFL ", stdout);
3900 if (flags & (1 << 3))
3901 fputs ("UFL ", stdout);
3902 if (flags & (1 << 4))
3903 fputs ("INX ", stdout);
3904 putchar ('\n');
3905 }
3906
3907 /* Print interesting information about the floating point processor
3908 (if present) or emulator. */
3909 static void
3910 arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file,
3911 struct frame_info *frame, const char *args)
3912 {
3913 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM);
3914 int type;
3915
3916 type = (status >> 24) & 127;
3917 if (status & (1 << 31))
3918 printf (_("Hardware FPU type %d\n"), type);
3919 else
3920 printf (_("Software FPU type %d\n"), type);
3921 /* i18n: [floating point unit] mask */
3922 fputs (_("mask: "), stdout);
3923 print_fpu_flags (status >> 16);
3924 /* i18n: [floating point unit] flags */
3925 fputs (_("flags: "), stdout);
3926 print_fpu_flags (status);
3927 }
3928
3929 /* Construct the ARM extended floating point type. */
3930 static struct type *
3931 arm_ext_type (struct gdbarch *gdbarch)
3932 {
3933 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3934
3935 if (!tdep->arm_ext_type)
3936 tdep->arm_ext_type
3937 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext",
3938 floatformats_arm_ext);
3939
3940 return tdep->arm_ext_type;
3941 }
3942
3943 static struct type *
3944 arm_neon_double_type (struct gdbarch *gdbarch)
3945 {
3946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3947
3948 if (tdep->neon_double_type == NULL)
3949 {
3950 struct type *t, *elem;
3951
3952 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d",
3953 TYPE_CODE_UNION);
3954 elem = builtin_type (gdbarch)->builtin_uint8;
3955 append_composite_type_field (t, "u8", init_vector_type (elem, 8));
3956 elem = builtin_type (gdbarch)->builtin_uint16;
3957 append_composite_type_field (t, "u16", init_vector_type (elem, 4));
3958 elem = builtin_type (gdbarch)->builtin_uint32;
3959 append_composite_type_field (t, "u32", init_vector_type (elem, 2));
3960 elem = builtin_type (gdbarch)->builtin_uint64;
3961 append_composite_type_field (t, "u64", elem);
3962 elem = builtin_type (gdbarch)->builtin_float;
3963 append_composite_type_field (t, "f32", init_vector_type (elem, 2));
3964 elem = builtin_type (gdbarch)->builtin_double;
3965 append_composite_type_field (t, "f64", elem);
3966
3967 TYPE_VECTOR (t) = 1;
3968 TYPE_NAME (t) = "neon_d";
3969 tdep->neon_double_type = t;
3970 }
3971
3972 return tdep->neon_double_type;
3973 }
3974
3975 /* FIXME: The vector types are not correctly ordered on big-endian
3976 targets. Just as s0 is the low bits of d0, d0[0] is also the low
3977 bits of d0 - regardless of what unit size is being held in d0. So
3978 the offset of the first uint8 in d0 is 7, but the offset of the
3979 first float is 4. This code works as-is for little-endian
3980 targets. */
3981
3982 static struct type *
3983 arm_neon_quad_type (struct gdbarch *gdbarch)
3984 {
3985 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3986
3987 if (tdep->neon_quad_type == NULL)
3988 {
3989 struct type *t, *elem;
3990
3991 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q",
3992 TYPE_CODE_UNION);
3993 elem = builtin_type (gdbarch)->builtin_uint8;
3994 append_composite_type_field (t, "u8", init_vector_type (elem, 16));
3995 elem = builtin_type (gdbarch)->builtin_uint16;
3996 append_composite_type_field (t, "u16", init_vector_type (elem, 8));
3997 elem = builtin_type (gdbarch)->builtin_uint32;
3998 append_composite_type_field (t, "u32", init_vector_type (elem, 4));
3999 elem = builtin_type (gdbarch)->builtin_uint64;
4000 append_composite_type_field (t, "u64", init_vector_type (elem, 2));
4001 elem = builtin_type (gdbarch)->builtin_float;
4002 append_composite_type_field (t, "f32", init_vector_type (elem, 4));
4003 elem = builtin_type (gdbarch)->builtin_double;
4004 append_composite_type_field (t, "f64", init_vector_type (elem, 2));
4005
4006 TYPE_VECTOR (t) = 1;
4007 TYPE_NAME (t) = "neon_q";
4008 tdep->neon_quad_type = t;
4009 }
4010
4011 return tdep->neon_quad_type;
4012 }
4013
4014 /* Return the GDB type object for the "standard" data type of data in
4015 register N. */
4016
4017 static struct type *
4018 arm_register_type (struct gdbarch *gdbarch, int regnum)
4019 {
4020 int num_regs = gdbarch_num_regs (gdbarch);
4021
4022 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
4023 && regnum >= num_regs && regnum < num_regs + 32)
4024 return builtin_type (gdbarch)->builtin_float;
4025
4026 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
4027 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16)
4028 return arm_neon_quad_type (gdbarch);
4029
4030 /* If the target description has register information, we are only
4031 in this function so that we can override the types of
4032 double-precision registers for NEON. */
4033 if (tdesc_has_registers (gdbarch_target_desc (gdbarch)))
4034 {
4035 struct type *t = tdesc_register_type (gdbarch, regnum);
4036
4037 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32
4038 && TYPE_CODE (t) == TYPE_CODE_FLT
4039 && gdbarch_tdep (gdbarch)->have_neon)
4040 return arm_neon_double_type (gdbarch);
4041 else
4042 return t;
4043 }
4044
4045 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS)
4046 {
4047 if (!gdbarch_tdep (gdbarch)->have_fpa_registers)
4048 return builtin_type (gdbarch)->builtin_void;
4049
4050 return arm_ext_type (gdbarch);
4051 }
4052 else if (regnum == ARM_SP_REGNUM)
4053 return builtin_type (gdbarch)->builtin_data_ptr;
4054 else if (regnum == ARM_PC_REGNUM)
4055 return builtin_type (gdbarch)->builtin_func_ptr;
4056 else if (regnum >= ARRAY_SIZE (arm_register_names))
4057 /* These registers are only supported on targets which supply
4058 an XML description. */
4059 return builtin_type (gdbarch)->builtin_int0;
4060 else
4061 return builtin_type (gdbarch)->builtin_uint32;
4062 }
4063
4064 /* Map a DWARF register REGNUM onto the appropriate GDB register
4065 number. */
4066
4067 static int
4068 arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
4069 {
4070 /* Core integer regs. */
4071 if (reg >= 0 && reg <= 15)
4072 return reg;
4073
4074 /* Legacy FPA encoding. These were once used in a way which
4075 overlapped with VFP register numbering, so their use is
4076 discouraged, but GDB doesn't support the ARM toolchain
4077 which used them for VFP. */
4078 if (reg >= 16 && reg <= 23)
4079 return ARM_F0_REGNUM + reg - 16;
4080
4081 /* New assignments for the FPA registers. */
4082 if (reg >= 96 && reg <= 103)
4083 return ARM_F0_REGNUM + reg - 96;
4084
4085 /* WMMX register assignments. */
4086 if (reg >= 104 && reg <= 111)
4087 return ARM_WCGR0_REGNUM + reg - 104;
4088
4089 if (reg >= 112 && reg <= 127)
4090 return ARM_WR0_REGNUM + reg - 112;
4091
4092 if (reg >= 192 && reg <= 199)
4093 return ARM_WC0_REGNUM + reg - 192;
4094
4095 /* VFP v2 registers. A double precision value is actually
4096 in d1 rather than s2, but the ABI only defines numbering
4097 for the single precision registers. This will "just work"
4098 in GDB for little endian targets (we'll read eight bytes,
4099 starting in s0 and then progressing to s1), but will be
4100 reversed on big endian targets with VFP. This won't
4101 be a problem for the new Neon quad registers; you're supposed
4102 to use DW_OP_piece for those. */
4103 if (reg >= 64 && reg <= 95)
4104 {
4105 char name_buf[4];
4106
4107 xsnprintf (name_buf, sizeof (name_buf), "s%d", reg - 64);
4108 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4109 strlen (name_buf));
4110 }
4111
4112 /* VFP v3 / Neon registers. This range is also used for VFP v2
4113 registers, except that it now describes d0 instead of s0. */
4114 if (reg >= 256 && reg <= 287)
4115 {
4116 char name_buf[4];
4117
4118 xsnprintf (name_buf, sizeof (name_buf), "d%d", reg - 256);
4119 return user_reg_map_name_to_regnum (gdbarch, name_buf,
4120 strlen (name_buf));
4121 }
4122
4123 return -1;
4124 }
4125
4126 /* Map GDB internal REGNUM onto the Arm simulator register numbers. */
4127 static int
4128 arm_register_sim_regno (struct gdbarch *gdbarch, int regnum)
4129 {
4130 int reg = regnum;
4131 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch));
4132
4133 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM)
4134 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM;
4135
4136 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM)
4137 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM;
4138
4139 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM)
4140 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM;
4141
4142 if (reg < NUM_GREGS)
4143 return SIM_ARM_R0_REGNUM + reg;
4144 reg -= NUM_GREGS;
4145
4146 if (reg < NUM_FREGS)
4147 return SIM_ARM_FP0_REGNUM + reg;
4148 reg -= NUM_FREGS;
4149
4150 if (reg < NUM_SREGS)
4151 return SIM_ARM_FPS_REGNUM + reg;
4152 reg -= NUM_SREGS;
4153
4154 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum);
4155 }
4156
4157 /* NOTE: cagney/2001-08-20: Both convert_from_extended() and
4158 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword.
4159 It is thought that this is is the floating-point register format on
4160 little-endian systems. */
4161
4162 static void
4163 convert_from_extended (const struct floatformat *fmt, const void *ptr,
4164 void *dbl, int endianess)
4165 {
4166 DOUBLEST d;
4167
4168 if (endianess == BFD_ENDIAN_BIG)
4169 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d);
4170 else
4171 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword,
4172 ptr, &d);
4173 floatformat_from_doublest (fmt, &d, dbl);
4174 }
4175
4176 static void
4177 convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr,
4178 int endianess)
4179 {
4180 DOUBLEST d;
4181
4182 floatformat_to_doublest (fmt, ptr, &d);
4183 if (endianess == BFD_ENDIAN_BIG)
4184 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl);
4185 else
4186 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword,
4187 &d, dbl);
4188 }
4189
4190 static int
4191 condition_true (unsigned long cond, unsigned long status_reg)
4192 {
4193 if (cond == INST_AL || cond == INST_NV)
4194 return 1;
4195
4196 switch (cond)
4197 {
4198 case INST_EQ:
4199 return ((status_reg & FLAG_Z) != 0);
4200 case INST_NE:
4201 return ((status_reg & FLAG_Z) == 0);
4202 case INST_CS:
4203 return ((status_reg & FLAG_C) != 0);
4204 case INST_CC:
4205 return ((status_reg & FLAG_C) == 0);
4206 case INST_MI:
4207 return ((status_reg & FLAG_N) != 0);
4208 case INST_PL:
4209 return ((status_reg & FLAG_N) == 0);
4210 case INST_VS:
4211 return ((status_reg & FLAG_V) != 0);
4212 case INST_VC:
4213 return ((status_reg & FLAG_V) == 0);
4214 case INST_HI:
4215 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C);
4216 case INST_LS:
4217 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C);
4218 case INST_GE:
4219 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0));
4220 case INST_LT:
4221 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0));
4222 case INST_GT:
4223 return (((status_reg & FLAG_Z) == 0)
4224 && (((status_reg & FLAG_N) == 0)
4225 == ((status_reg & FLAG_V) == 0)));
4226 case INST_LE:
4227 return (((status_reg & FLAG_Z) != 0)
4228 || (((status_reg & FLAG_N) == 0)
4229 != ((status_reg & FLAG_V) == 0)));
4230 }
4231 return 1;
4232 }
4233
4234 static unsigned long
4235 shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry,
4236 unsigned long pc_val, unsigned long status_reg)
4237 {
4238 unsigned long res, shift;
4239 int rm = bits (inst, 0, 3);
4240 unsigned long shifttype = bits (inst, 5, 6);
4241
4242 if (bit (inst, 4))
4243 {
4244 int rs = bits (inst, 8, 11);
4245 shift = (rs == 15 ? pc_val + 8
4246 : get_frame_register_unsigned (frame, rs)) & 0xFF;
4247 }
4248 else
4249 shift = bits (inst, 7, 11);
4250
4251 res = (rm == ARM_PC_REGNUM
4252 ? (pc_val + (bit (inst, 4) ? 12 : 8))
4253 : get_frame_register_unsigned (frame, rm));
4254
4255 switch (shifttype)
4256 {
4257 case 0: /* LSL */
4258 res = shift >= 32 ? 0 : res << shift;
4259 break;
4260
4261 case 1: /* LSR */
4262 res = shift >= 32 ? 0 : res >> shift;
4263 break;
4264
4265 case 2: /* ASR */
4266 if (shift >= 32)
4267 shift = 31;
4268 res = ((res & 0x80000000L)
4269 ? ~((~res) >> shift) : res >> shift);
4270 break;
4271
4272 case 3: /* ROR/RRX */
4273 shift &= 31;
4274 if (shift == 0)
4275 res = (res >> 1) | (carry ? 0x80000000L : 0);
4276 else
4277 res = (res >> shift) | (res << (32 - shift));
4278 break;
4279 }
4280
4281 return res & 0xffffffff;
4282 }
4283
4284 /* Return number of 1-bits in VAL. */
4285
4286 static int
4287 bitcount (unsigned long val)
4288 {
4289 int nbits;
4290 for (nbits = 0; val != 0; nbits++)
4291 val &= val - 1; /* Delete rightmost 1-bit in val. */
4292 return nbits;
4293 }
4294
4295 /* Return the size in bytes of the complete Thumb instruction whose
4296 first halfword is INST1. */
4297
4298 static int
4299 thumb_insn_size (unsigned short inst1)
4300 {
4301 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0)
4302 return 4;
4303 else
4304 return 2;
4305 }
4306
4307 static int
4308 thumb_advance_itstate (unsigned int itstate)
4309 {
4310 /* Preserve IT[7:5], the first three bits of the condition. Shift
4311 the upcoming condition flags left by one bit. */
4312 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f);
4313
4314 /* If we have finished the IT block, clear the state. */
4315 if ((itstate & 0x0f) == 0)
4316 itstate = 0;
4317
4318 return itstate;
4319 }
4320
4321 /* Find the next PC after the current instruction executes. In some
4322 cases we can not statically determine the answer (see the IT state
4323 handling in this function); in that case, a breakpoint may be
4324 inserted in addition to the returned PC, which will be used to set
4325 another breakpoint by our caller. */
4326
4327 static CORE_ADDR
4328 thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4329 {
4330 struct gdbarch *gdbarch = get_frame_arch (frame);
4331 struct address_space *aspace = get_frame_address_space (frame);
4332 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4333 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4334 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */
4335 unsigned short inst1;
4336 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */
4337 unsigned long offset;
4338 ULONGEST status, itstate;
4339
4340 nextpc = MAKE_THUMB_ADDR (nextpc);
4341 pc_val = MAKE_THUMB_ADDR (pc_val);
4342
4343 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code);
4344
4345 /* Thumb-2 conditional execution support. There are eight bits in
4346 the CPSR which describe conditional execution state. Once
4347 reconstructed (they're in a funny order), the low five bits
4348 describe the low bit of the condition for each instruction and
4349 how many instructions remain. The high three bits describe the
4350 base condition. One of the low four bits will be set if an IT
4351 block is active. These bits read as zero on earlier
4352 processors. */
4353 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4354 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
4355
4356 /* If-Then handling. On GNU/Linux, where this routine is used, we
4357 use an undefined instruction as a breakpoint. Unlike BKPT, IT
4358 can disable execution of the undefined instruction. So we might
4359 miss the breakpoint if we set it on a skipped conditional
4360 instruction. Because conditional instructions can change the
4361 flags, affecting the execution of further instructions, we may
4362 need to set two breakpoints. */
4363
4364 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL)
4365 {
4366 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
4367 {
4368 /* An IT instruction. Because this instruction does not
4369 modify the flags, we can accurately predict the next
4370 executed instruction. */
4371 itstate = inst1 & 0x00ff;
4372 pc += thumb_insn_size (inst1);
4373
4374 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4375 {
4376 inst1 = read_memory_unsigned_integer (pc, 2,
4377 byte_order_for_code);
4378 pc += thumb_insn_size (inst1);
4379 itstate = thumb_advance_itstate (itstate);
4380 }
4381
4382 return MAKE_THUMB_ADDR (pc);
4383 }
4384 else if (itstate != 0)
4385 {
4386 /* We are in a conditional block. Check the condition. */
4387 if (! condition_true (itstate >> 4, status))
4388 {
4389 /* Advance to the next executed instruction. */
4390 pc += thumb_insn_size (inst1);
4391 itstate = thumb_advance_itstate (itstate);
4392
4393 while (itstate != 0 && ! condition_true (itstate >> 4, status))
4394 {
4395 inst1 = read_memory_unsigned_integer (pc, 2,
4396 byte_order_for_code);
4397 pc += thumb_insn_size (inst1);
4398 itstate = thumb_advance_itstate (itstate);
4399 }
4400
4401 return MAKE_THUMB_ADDR (pc);
4402 }
4403 else if ((itstate & 0x0f) == 0x08)
4404 {
4405 /* This is the last instruction of the conditional
4406 block, and it is executed. We can handle it normally
4407 because the following instruction is not conditional,
4408 and we must handle it normally because it is
4409 permitted to branch. Fall through. */
4410 }
4411 else
4412 {
4413 int cond_negated;
4414
4415 /* There are conditional instructions after this one.
4416 If this instruction modifies the flags, then we can
4417 not predict what the next executed instruction will
4418 be. Fortunately, this instruction is architecturally
4419 forbidden to branch; we know it will fall through.
4420 Start by skipping past it. */
4421 pc += thumb_insn_size (inst1);
4422 itstate = thumb_advance_itstate (itstate);
4423
4424 /* Set a breakpoint on the following instruction. */
4425 gdb_assert ((itstate & 0x0f) != 0);
4426 arm_insert_single_step_breakpoint (gdbarch, aspace,
4427 MAKE_THUMB_ADDR (pc));
4428 cond_negated = (itstate >> 4) & 1;
4429
4430 /* Skip all following instructions with the same
4431 condition. If there is a later instruction in the IT
4432 block with the opposite condition, set the other
4433 breakpoint there. If not, then set a breakpoint on
4434 the instruction after the IT block. */
4435 do
4436 {
4437 inst1 = read_memory_unsigned_integer (pc, 2,
4438 byte_order_for_code);
4439 pc += thumb_insn_size (inst1);
4440 itstate = thumb_advance_itstate (itstate);
4441 }
4442 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated);
4443
4444 return MAKE_THUMB_ADDR (pc);
4445 }
4446 }
4447 }
4448 else if (itstate & 0x0f)
4449 {
4450 /* We are in a conditional block. Check the condition. */
4451 int cond = itstate >> 4;
4452
4453 if (! condition_true (cond, status))
4454 /* Advance to the next instruction. All the 32-bit
4455 instructions share a common prefix. */
4456 return MAKE_THUMB_ADDR (pc + thumb_insn_size (inst1));
4457
4458 /* Otherwise, handle the instruction normally. */
4459 }
4460
4461 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */
4462 {
4463 CORE_ADDR sp;
4464
4465 /* Fetch the saved PC from the stack. It's stored above
4466 all of the other registers. */
4467 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE;
4468 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM);
4469 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order);
4470 }
4471 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */
4472 {
4473 unsigned long cond = bits (inst1, 8, 11);
4474 if (cond == 0x0f) /* 0x0f = SWI */
4475 {
4476 struct gdbarch_tdep *tdep;
4477 tdep = gdbarch_tdep (gdbarch);
4478
4479 if (tdep->syscall_next_pc != NULL)
4480 nextpc = tdep->syscall_next_pc (frame);
4481
4482 }
4483 else if (cond != 0x0f && condition_true (cond, status))
4484 nextpc = pc_val + (sbits (inst1, 0, 7) << 1);
4485 }
4486 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */
4487 {
4488 nextpc = pc_val + (sbits (inst1, 0, 10) << 1);
4489 }
4490 else if (thumb_insn_size (inst1) == 4) /* 32-bit instruction */
4491 {
4492 unsigned short inst2;
4493 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code);
4494
4495 /* Default to the next instruction. */
4496 nextpc = pc + 4;
4497 nextpc = MAKE_THUMB_ADDR (nextpc);
4498
4499 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000)
4500 {
4501 /* Branches and miscellaneous control instructions. */
4502
4503 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000)
4504 {
4505 /* B, BL, BLX. */
4506 int j1, j2, imm1, imm2;
4507
4508 imm1 = sbits (inst1, 0, 10);
4509 imm2 = bits (inst2, 0, 10);
4510 j1 = bit (inst2, 13);
4511 j2 = bit (inst2, 11);
4512
4513 offset = ((imm1 << 12) + (imm2 << 1));
4514 offset ^= ((!j2) << 22) | ((!j1) << 23);
4515
4516 nextpc = pc_val + offset;
4517 /* For BLX make sure to clear the low bits. */
4518 if (bit (inst2, 12) == 0)
4519 nextpc = nextpc & 0xfffffffc;
4520 }
4521 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00)
4522 {
4523 /* SUBS PC, LR, #imm8. */
4524 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM);
4525 nextpc -= inst2 & 0x00ff;
4526 }
4527 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380)
4528 {
4529 /* Conditional branch. */
4530 if (condition_true (bits (inst1, 6, 9), status))
4531 {
4532 int sign, j1, j2, imm1, imm2;
4533
4534 sign = sbits (inst1, 10, 10);
4535 imm1 = bits (inst1, 0, 5);
4536 imm2 = bits (inst2, 0, 10);
4537 j1 = bit (inst2, 13);
4538 j2 = bit (inst2, 11);
4539
4540 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
4541 offset += (imm1 << 12) + (imm2 << 1);
4542
4543 nextpc = pc_val + offset;
4544 }
4545 }
4546 }
4547 else if ((inst1 & 0xfe50) == 0xe810)
4548 {
4549 /* Load multiple or RFE. */
4550 int rn, offset, load_pc = 1;
4551
4552 rn = bits (inst1, 0, 3);
4553 if (bit (inst1, 7) && !bit (inst1, 8))
4554 {
4555 /* LDMIA or POP */
4556 if (!bit (inst2, 15))
4557 load_pc = 0;
4558 offset = bitcount (inst2) * 4 - 4;
4559 }
4560 else if (!bit (inst1, 7) && bit (inst1, 8))
4561 {
4562 /* LDMDB */
4563 if (!bit (inst2, 15))
4564 load_pc = 0;
4565 offset = -4;
4566 }
4567 else if (bit (inst1, 7) && bit (inst1, 8))
4568 {
4569 /* RFEIA */
4570 offset = 0;
4571 }
4572 else if (!bit (inst1, 7) && !bit (inst1, 8))
4573 {
4574 /* RFEDB */
4575 offset = -8;
4576 }
4577 else
4578 load_pc = 0;
4579
4580 if (load_pc)
4581 {
4582 CORE_ADDR addr = get_frame_register_unsigned (frame, rn);
4583 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4);
4584 }
4585 }
4586 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00)
4587 {
4588 /* MOV PC or MOVS PC. */
4589 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4590 nextpc = MAKE_THUMB_ADDR (nextpc);
4591 }
4592 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000)
4593 {
4594 /* LDR PC. */
4595 CORE_ADDR base;
4596 int rn, load_pc = 1;
4597
4598 rn = bits (inst1, 0, 3);
4599 base = get_frame_register_unsigned (frame, rn);
4600 if (rn == ARM_PC_REGNUM)
4601 {
4602 base = (base + 4) & ~(CORE_ADDR) 0x3;
4603 if (bit (inst1, 7))
4604 base += bits (inst2, 0, 11);
4605 else
4606 base -= bits (inst2, 0, 11);
4607 }
4608 else if (bit (inst1, 7))
4609 base += bits (inst2, 0, 11);
4610 else if (bit (inst2, 11))
4611 {
4612 if (bit (inst2, 10))
4613 {
4614 if (bit (inst2, 9))
4615 base += bits (inst2, 0, 7);
4616 else
4617 base -= bits (inst2, 0, 7);
4618 }
4619 }
4620 else if ((inst2 & 0x0fc0) == 0x0000)
4621 {
4622 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3);
4623 base += get_frame_register_unsigned (frame, rm) << shift;
4624 }
4625 else
4626 /* Reserved. */
4627 load_pc = 0;
4628
4629 if (load_pc)
4630 nextpc = get_frame_memory_unsigned (frame, base, 4);
4631 }
4632 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000)
4633 {
4634 /* TBB. */
4635 CORE_ADDR tbl_reg, table, offset, length;
4636
4637 tbl_reg = bits (inst1, 0, 3);
4638 if (tbl_reg == 0x0f)
4639 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4640 else
4641 table = get_frame_register_unsigned (frame, tbl_reg);
4642
4643 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4644 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1);
4645 nextpc = pc_val + length;
4646 }
4647 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010)
4648 {
4649 /* TBH. */
4650 CORE_ADDR tbl_reg, table, offset, length;
4651
4652 tbl_reg = bits (inst1, 0, 3);
4653 if (tbl_reg == 0x0f)
4654 table = pc + 4; /* Regcache copy of PC isn't right yet. */
4655 else
4656 table = get_frame_register_unsigned (frame, tbl_reg);
4657
4658 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3));
4659 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2);
4660 nextpc = pc_val + length;
4661 }
4662 }
4663 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */
4664 {
4665 if (bits (inst1, 3, 6) == 0x0f)
4666 nextpc = UNMAKE_THUMB_ADDR (pc_val);
4667 else
4668 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4669 }
4670 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */
4671 {
4672 if (bits (inst1, 3, 6) == 0x0f)
4673 nextpc = pc_val;
4674 else
4675 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6));
4676
4677 nextpc = MAKE_THUMB_ADDR (nextpc);
4678 }
4679 else if ((inst1 & 0xf500) == 0xb100)
4680 {
4681 /* CBNZ or CBZ. */
4682 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1);
4683 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2));
4684
4685 if (bit (inst1, 11) && reg != 0)
4686 nextpc = pc_val + imm;
4687 else if (!bit (inst1, 11) && reg == 0)
4688 nextpc = pc_val + imm;
4689 }
4690 return nextpc;
4691 }
4692
4693 /* Get the raw next address. PC is the current program counter, in
4694 FRAME, which is assumed to be executing in ARM mode.
4695
4696 The value returned has the execution state of the next instruction
4697 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is
4698 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory
4699 address. */
4700
4701 static CORE_ADDR
4702 arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc)
4703 {
4704 struct gdbarch *gdbarch = get_frame_arch (frame);
4705 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4706 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
4707 unsigned long pc_val;
4708 unsigned long this_instr;
4709 unsigned long status;
4710 CORE_ADDR nextpc;
4711
4712 pc_val = (unsigned long) pc;
4713 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
4714
4715 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
4716 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */
4717
4718 if (bits (this_instr, 28, 31) == INST_NV)
4719 switch (bits (this_instr, 24, 27))
4720 {
4721 case 0xa:
4722 case 0xb:
4723 {
4724 /* Branch with Link and change to Thumb. */
4725 nextpc = BranchDest (pc, this_instr);
4726 nextpc |= bit (this_instr, 24) << 1;
4727 nextpc = MAKE_THUMB_ADDR (nextpc);
4728 break;
4729 }
4730 case 0xc:
4731 case 0xd:
4732 case 0xe:
4733 /* Coprocessor register transfer. */
4734 if (bits (this_instr, 12, 15) == 15)
4735 error (_("Invalid update to pc in instruction"));
4736 break;
4737 }
4738 else if (condition_true (bits (this_instr, 28, 31), status))
4739 {
4740 switch (bits (this_instr, 24, 27))
4741 {
4742 case 0x0:
4743 case 0x1: /* data processing */
4744 case 0x2:
4745 case 0x3:
4746 {
4747 unsigned long operand1, operand2, result = 0;
4748 unsigned long rn;
4749 int c;
4750
4751 if (bits (this_instr, 12, 15) != 15)
4752 break;
4753
4754 if (bits (this_instr, 22, 25) == 0
4755 && bits (this_instr, 4, 7) == 9) /* multiply */
4756 error (_("Invalid update to pc in instruction"));
4757
4758 /* BX <reg>, BLX <reg> */
4759 if (bits (this_instr, 4, 27) == 0x12fff1
4760 || bits (this_instr, 4, 27) == 0x12fff3)
4761 {
4762 rn = bits (this_instr, 0, 3);
4763 nextpc = ((rn == ARM_PC_REGNUM)
4764 ? (pc_val + 8)
4765 : get_frame_register_unsigned (frame, rn));
4766
4767 return nextpc;
4768 }
4769
4770 /* Multiply into PC. */
4771 c = (status & FLAG_C) ? 1 : 0;
4772 rn = bits (this_instr, 16, 19);
4773 operand1 = ((rn == ARM_PC_REGNUM)
4774 ? (pc_val + 8)
4775 : get_frame_register_unsigned (frame, rn));
4776
4777 if (bit (this_instr, 25))
4778 {
4779 unsigned long immval = bits (this_instr, 0, 7);
4780 unsigned long rotate = 2 * bits (this_instr, 8, 11);
4781 operand2 = ((immval >> rotate) | (immval << (32 - rotate)))
4782 & 0xffffffff;
4783 }
4784 else /* operand 2 is a shifted register. */
4785 operand2 = shifted_reg_val (frame, this_instr, c,
4786 pc_val, status);
4787
4788 switch (bits (this_instr, 21, 24))
4789 {
4790 case 0x0: /*and */
4791 result = operand1 & operand2;
4792 break;
4793
4794 case 0x1: /*eor */
4795 result = operand1 ^ operand2;
4796 break;
4797
4798 case 0x2: /*sub */
4799 result = operand1 - operand2;
4800 break;
4801
4802 case 0x3: /*rsb */
4803 result = operand2 - operand1;
4804 break;
4805
4806 case 0x4: /*add */
4807 result = operand1 + operand2;
4808 break;
4809
4810 case 0x5: /*adc */
4811 result = operand1 + operand2 + c;
4812 break;
4813
4814 case 0x6: /*sbc */
4815 result = operand1 - operand2 + c;
4816 break;
4817
4818 case 0x7: /*rsc */
4819 result = operand2 - operand1 + c;
4820 break;
4821
4822 case 0x8:
4823 case 0x9:
4824 case 0xa:
4825 case 0xb: /* tst, teq, cmp, cmn */
4826 result = (unsigned long) nextpc;
4827 break;
4828
4829 case 0xc: /*orr */
4830 result = operand1 | operand2;
4831 break;
4832
4833 case 0xd: /*mov */
4834 /* Always step into a function. */
4835 result = operand2;
4836 break;
4837
4838 case 0xe: /*bic */
4839 result = operand1 & ~operand2;
4840 break;
4841
4842 case 0xf: /*mvn */
4843 result = ~operand2;
4844 break;
4845 }
4846
4847 /* In 26-bit APCS the bottom two bits of the result are
4848 ignored, and we always end up in ARM state. */
4849 if (!arm_apcs_32)
4850 nextpc = arm_addr_bits_remove (gdbarch, result);
4851 else
4852 nextpc = result;
4853
4854 break;
4855 }
4856
4857 case 0x4:
4858 case 0x5: /* data transfer */
4859 case 0x6:
4860 case 0x7:
4861 if (bit (this_instr, 20))
4862 {
4863 /* load */
4864 if (bits (this_instr, 12, 15) == 15)
4865 {
4866 /* rd == pc */
4867 unsigned long rn;
4868 unsigned long base;
4869
4870 if (bit (this_instr, 22))
4871 error (_("Invalid update to pc in instruction"));
4872
4873 /* byte write to PC */
4874 rn = bits (this_instr, 16, 19);
4875 base = ((rn == ARM_PC_REGNUM)
4876 ? (pc_val + 8)
4877 : get_frame_register_unsigned (frame, rn));
4878
4879 if (bit (this_instr, 24))
4880 {
4881 /* pre-indexed */
4882 int c = (status & FLAG_C) ? 1 : 0;
4883 unsigned long offset =
4884 (bit (this_instr, 25)
4885 ? shifted_reg_val (frame, this_instr, c, pc_val, status)
4886 : bits (this_instr, 0, 11));
4887
4888 if (bit (this_instr, 23))
4889 base += offset;
4890 else
4891 base -= offset;
4892 }
4893 nextpc =
4894 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR) base,
4895 4, byte_order);
4896 }
4897 }
4898 break;
4899
4900 case 0x8:
4901 case 0x9: /* block transfer */
4902 if (bit (this_instr, 20))
4903 {
4904 /* LDM */
4905 if (bit (this_instr, 15))
4906 {
4907 /* loading pc */
4908 int offset = 0;
4909 unsigned long rn_val
4910 = get_frame_register_unsigned (frame,
4911 bits (this_instr, 16, 19));
4912
4913 if (bit (this_instr, 23))
4914 {
4915 /* up */
4916 unsigned long reglist = bits (this_instr, 0, 14);
4917 offset = bitcount (reglist) * 4;
4918 if (bit (this_instr, 24)) /* pre */
4919 offset += 4;
4920 }
4921 else if (bit (this_instr, 24))
4922 offset = -4;
4923
4924 nextpc =
4925 (CORE_ADDR) read_memory_unsigned_integer ((CORE_ADDR)
4926 (rn_val + offset),
4927 4, byte_order);
4928 }
4929 }
4930 break;
4931
4932 case 0xb: /* branch & link */
4933 case 0xa: /* branch */
4934 {
4935 nextpc = BranchDest (pc, this_instr);
4936 break;
4937 }
4938
4939 case 0xc:
4940 case 0xd:
4941 case 0xe: /* coproc ops */
4942 break;
4943 case 0xf: /* SWI */
4944 {
4945 struct gdbarch_tdep *tdep;
4946 tdep = gdbarch_tdep (gdbarch);
4947
4948 if (tdep->syscall_next_pc != NULL)
4949 nextpc = tdep->syscall_next_pc (frame);
4950
4951 }
4952 break;
4953
4954 default:
4955 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n"));
4956 return (pc);
4957 }
4958 }
4959
4960 return nextpc;
4961 }
4962
4963 /* Determine next PC after current instruction executes. Will call either
4964 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite
4965 loop is detected. */
4966
4967 CORE_ADDR
4968 arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc)
4969 {
4970 CORE_ADDR nextpc;
4971
4972 if (arm_frame_is_thumb (frame))
4973 {
4974 nextpc = thumb_get_next_pc_raw (frame, pc);
4975 if (nextpc == MAKE_THUMB_ADDR (pc))
4976 error (_("Infinite loop detected"));
4977 }
4978 else
4979 {
4980 nextpc = arm_get_next_pc_raw (frame, pc);
4981 if (nextpc == pc)
4982 error (_("Infinite loop detected"));
4983 }
4984
4985 return nextpc;
4986 }
4987
4988 /* Like insert_single_step_breakpoint, but make sure we use a breakpoint
4989 of the appropriate mode (as encoded in the PC value), even if this
4990 differs from what would be expected according to the symbol tables. */
4991
4992 void
4993 arm_insert_single_step_breakpoint (struct gdbarch *gdbarch,
4994 struct address_space *aspace,
4995 CORE_ADDR pc)
4996 {
4997 struct cleanup *old_chain
4998 = make_cleanup_restore_integer (&arm_override_mode);
4999
5000 arm_override_mode = IS_THUMB_ADDR (pc);
5001 pc = gdbarch_addr_bits_remove (gdbarch, pc);
5002
5003 insert_single_step_breakpoint (gdbarch, aspace, pc);
5004
5005 do_cleanups (old_chain);
5006 }
5007
5008 /* Checks for an atomic sequence of instructions beginning with a LDREX{,B,H,D}
5009 instruction and ending with a STREX{,B,H,D} instruction. If such a sequence
5010 is found, attempt to step through it. A breakpoint is placed at the end of
5011 the sequence. */
5012
5013 static int
5014 thumb_deal_with_atomic_sequence_raw (struct frame_info *frame)
5015 {
5016 struct gdbarch *gdbarch = get_frame_arch (frame);
5017 struct address_space *aspace = get_frame_address_space (frame);
5018 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5019 CORE_ADDR pc = get_frame_pc (frame);
5020 CORE_ADDR breaks[2] = {-1, -1};
5021 CORE_ADDR loc = pc;
5022 unsigned short insn1, insn2;
5023 int insn_count;
5024 int index;
5025 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5026 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5027 ULONGEST status, itstate;
5028
5029 /* We currently do not support atomic sequences within an IT block. */
5030 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM);
5031 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3);
5032 if (itstate & 0x0f)
5033 return 0;
5034
5035 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction. */
5036 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5037 loc += 2;
5038 if (thumb_insn_size (insn1) != 4)
5039 return 0;
5040
5041 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5042 loc += 2;
5043 if (!((insn1 & 0xfff0) == 0xe850
5044 || ((insn1 & 0xfff0) == 0xe8d0 && (insn2 & 0x00c0) == 0x0040)))
5045 return 0;
5046
5047 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5048 instructions. */
5049 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5050 {
5051 insn1 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5052 loc += 2;
5053
5054 if (thumb_insn_size (insn1) != 4)
5055 {
5056 /* Assume that there is at most one conditional branch in the
5057 atomic sequence. If a conditional branch is found, put a
5058 breakpoint in its destination address. */
5059 if ((insn1 & 0xf000) == 0xd000 && bits (insn1, 8, 11) != 0x0f)
5060 {
5061 if (last_breakpoint > 0)
5062 return 0; /* More than one conditional branch found,
5063 fallback to the standard code. */
5064
5065 breaks[1] = loc + 2 + (sbits (insn1, 0, 7) << 1);
5066 last_breakpoint++;
5067 }
5068
5069 /* We do not support atomic sequences that use any *other*
5070 instructions but conditional branches to change the PC.
5071 Fall back to standard code to avoid losing control of
5072 execution. */
5073 else if (thumb_instruction_changes_pc (insn1))
5074 return 0;
5075 }
5076 else
5077 {
5078 insn2 = read_memory_unsigned_integer (loc, 2, byte_order_for_code);
5079 loc += 2;
5080
5081 /* Assume that there is at most one conditional branch in the
5082 atomic sequence. If a conditional branch is found, put a
5083 breakpoint in its destination address. */
5084 if ((insn1 & 0xf800) == 0xf000
5085 && (insn2 & 0xd000) == 0x8000
5086 && (insn1 & 0x0380) != 0x0380)
5087 {
5088 int sign, j1, j2, imm1, imm2;
5089 unsigned int offset;
5090
5091 sign = sbits (insn1, 10, 10);
5092 imm1 = bits (insn1, 0, 5);
5093 imm2 = bits (insn2, 0, 10);
5094 j1 = bit (insn2, 13);
5095 j2 = bit (insn2, 11);
5096
5097 offset = (sign << 20) + (j2 << 19) + (j1 << 18);
5098 offset += (imm1 << 12) + (imm2 << 1);
5099
5100 if (last_breakpoint > 0)
5101 return 0; /* More than one conditional branch found,
5102 fallback to the standard code. */
5103
5104 breaks[1] = loc + offset;
5105 last_breakpoint++;
5106 }
5107
5108 /* We do not support atomic sequences that use any *other*
5109 instructions but conditional branches to change the PC.
5110 Fall back to standard code to avoid losing control of
5111 execution. */
5112 else if (thumb2_instruction_changes_pc (insn1, insn2))
5113 return 0;
5114
5115 /* If we find a strex{,b,h,d}, we're done. */
5116 if ((insn1 & 0xfff0) == 0xe840
5117 || ((insn1 & 0xfff0) == 0xe8c0 && (insn2 & 0x00c0) == 0x0040))
5118 break;
5119 }
5120 }
5121
5122 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5123 if (insn_count == atomic_sequence_length)
5124 return 0;
5125
5126 /* Insert a breakpoint right after the end of the atomic sequence. */
5127 breaks[0] = loc;
5128
5129 /* Check for duplicated breakpoints. Check also for a breakpoint
5130 placed (branch instruction's destination) anywhere in sequence. */
5131 if (last_breakpoint
5132 && (breaks[1] == breaks[0]
5133 || (breaks[1] >= pc && breaks[1] < loc)))
5134 last_breakpoint = 0;
5135
5136 /* Effectively inserts the breakpoints. */
5137 for (index = 0; index <= last_breakpoint; index++)
5138 arm_insert_single_step_breakpoint (gdbarch, aspace,
5139 MAKE_THUMB_ADDR (breaks[index]));
5140
5141 return 1;
5142 }
5143
5144 static int
5145 arm_deal_with_atomic_sequence_raw (struct frame_info *frame)
5146 {
5147 struct gdbarch *gdbarch = get_frame_arch (frame);
5148 struct address_space *aspace = get_frame_address_space (frame);
5149 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
5150 CORE_ADDR pc = get_frame_pc (frame);
5151 CORE_ADDR breaks[2] = {-1, -1};
5152 CORE_ADDR loc = pc;
5153 unsigned int insn;
5154 int insn_count;
5155 int index;
5156 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
5157 const int atomic_sequence_length = 16; /* Instruction sequence length. */
5158
5159 /* Assume all atomic sequences start with a ldrex{,b,h,d} instruction.
5160 Note that we do not currently support conditionally executed atomic
5161 instructions. */
5162 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5163 loc += 4;
5164 if ((insn & 0xff9000f0) != 0xe1900090)
5165 return 0;
5166
5167 /* Assume that no atomic sequence is longer than "atomic_sequence_length"
5168 instructions. */
5169 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
5170 {
5171 insn = read_memory_unsigned_integer (loc, 4, byte_order_for_code);
5172 loc += 4;
5173
5174 /* Assume that there is at most one conditional branch in the atomic
5175 sequence. If a conditional branch is found, put a breakpoint in
5176 its destination address. */
5177 if (bits (insn, 24, 27) == 0xa)
5178 {
5179 if (last_breakpoint > 0)
5180 return 0; /* More than one conditional branch found, fallback
5181 to the standard single-step code. */
5182
5183 breaks[1] = BranchDest (loc - 4, insn);
5184 last_breakpoint++;
5185 }
5186
5187 /* We do not support atomic sequences that use any *other* instructions
5188 but conditional branches to change the PC. Fall back to standard
5189 code to avoid losing control of execution. */
5190 else if (arm_instruction_changes_pc (insn))
5191 return 0;
5192
5193 /* If we find a strex{,b,h,d}, we're done. */
5194 if ((insn & 0xff9000f0) == 0xe1800090)
5195 break;
5196 }
5197
5198 /* If we didn't find the strex{,b,h,d}, we cannot handle the sequence. */
5199 if (insn_count == atomic_sequence_length)
5200 return 0;
5201
5202 /* Insert a breakpoint right after the end of the atomic sequence. */
5203 breaks[0] = loc;
5204
5205 /* Check for duplicated breakpoints. Check also for a breakpoint
5206 placed (branch instruction's destination) anywhere in sequence. */
5207 if (last_breakpoint
5208 && (breaks[1] == breaks[0]
5209 || (breaks[1] >= pc && breaks[1] < loc)))
5210 last_breakpoint = 0;
5211
5212 /* Effectively inserts the breakpoints. */
5213 for (index = 0; index <= last_breakpoint; index++)
5214 arm_insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
5215
5216 return 1;
5217 }
5218
5219 int
5220 arm_deal_with_atomic_sequence (struct frame_info *frame)
5221 {
5222 if (arm_frame_is_thumb (frame))
5223 return thumb_deal_with_atomic_sequence_raw (frame);
5224 else
5225 return arm_deal_with_atomic_sequence_raw (frame);
5226 }
5227
5228 /* single_step() is called just before we want to resume the inferior,
5229 if we want to single-step it but there is no hardware or kernel
5230 single-step support. We find the target of the coming instruction
5231 and breakpoint it. */
5232
5233 int
5234 arm_software_single_step (struct frame_info *frame)
5235 {
5236 struct gdbarch *gdbarch = get_frame_arch (frame);
5237 struct address_space *aspace = get_frame_address_space (frame);
5238 CORE_ADDR next_pc;
5239
5240 if (arm_deal_with_atomic_sequence (frame))
5241 return 1;
5242
5243 next_pc = arm_get_next_pc (frame, get_frame_pc (frame));
5244 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc);
5245
5246 return 1;
5247 }
5248
5249 /* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand
5250 the buffer to be NEW_LEN bytes ending at ENDADDR. Return
5251 NULL if an error occurs. BUF is freed. */
5252
5253 static gdb_byte *
5254 extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr,
5255 int old_len, int new_len)
5256 {
5257 gdb_byte *new_buf;
5258 int bytes_to_read = new_len - old_len;
5259
5260 new_buf = xmalloc (new_len);
5261 memcpy (new_buf + bytes_to_read, buf, old_len);
5262 xfree (buf);
5263 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0)
5264 {
5265 xfree (new_buf);
5266 return NULL;
5267 }
5268 return new_buf;
5269 }
5270
5271 /* An IT block is at most the 2-byte IT instruction followed by
5272 four 4-byte instructions. The furthest back we must search to
5273 find an IT block that affects the current instruction is thus
5274 2 + 3 * 4 == 14 bytes. */
5275 #define MAX_IT_BLOCK_PREFIX 14
5276
5277 /* Use a quick scan if there are more than this many bytes of
5278 code. */
5279 #define IT_SCAN_THRESHOLD 32
5280
5281 /* Adjust a breakpoint's address to move breakpoints out of IT blocks.
5282 A breakpoint in an IT block may not be hit, depending on the
5283 condition flags. */
5284 static CORE_ADDR
5285 arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr)
5286 {
5287 gdb_byte *buf;
5288 char map_type;
5289 CORE_ADDR boundary, func_start;
5290 int buf_len;
5291 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch);
5292 int i, any, last_it, last_it_count;
5293
5294 /* If we are using BKPT breakpoints, none of this is necessary. */
5295 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL)
5296 return bpaddr;
5297
5298 /* ARM mode does not have this problem. */
5299 if (!arm_pc_is_thumb (gdbarch, bpaddr))
5300 return bpaddr;
5301
5302 /* We are setting a breakpoint in Thumb code that could potentially
5303 contain an IT block. The first step is to find how much Thumb
5304 code there is; we do not need to read outside of known Thumb
5305 sequences. */
5306 map_type = arm_find_mapping_symbol (bpaddr, &boundary);
5307 if (map_type == 0)
5308 /* Thumb-2 code must have mapping symbols to have a chance. */
5309 return bpaddr;
5310
5311 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr);
5312
5313 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL)
5314 && func_start > boundary)
5315 boundary = func_start;
5316
5317 /* Search for a candidate IT instruction. We have to do some fancy
5318 footwork to distinguish a real IT instruction from the second
5319 half of a 32-bit instruction, but there is no need for that if
5320 there's no candidate. */
5321 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX);
5322 if (buf_len == 0)
5323 /* No room for an IT instruction. */
5324 return bpaddr;
5325
5326 buf = xmalloc (buf_len);
5327 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0)
5328 return bpaddr;
5329 any = 0;
5330 for (i = 0; i < buf_len; i += 2)
5331 {
5332 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5333 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5334 {
5335 any = 1;
5336 break;
5337 }
5338 }
5339 if (any == 0)
5340 {
5341 xfree (buf);
5342 return bpaddr;
5343 }
5344
5345 /* OK, the code bytes before this instruction contain at least one
5346 halfword which resembles an IT instruction. We know that it's
5347 Thumb code, but there are still two possibilities. Either the
5348 halfword really is an IT instruction, or it is the second half of
5349 a 32-bit Thumb instruction. The only way we can tell is to
5350 scan forwards from a known instruction boundary. */
5351 if (bpaddr - boundary > IT_SCAN_THRESHOLD)
5352 {
5353 int definite;
5354
5355 /* There's a lot of code before this instruction. Start with an
5356 optimistic search; it's easy to recognize halfwords that can
5357 not be the start of a 32-bit instruction, and use that to
5358 lock on to the instruction boundaries. */
5359 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD);
5360 if (buf == NULL)
5361 return bpaddr;
5362 buf_len = IT_SCAN_THRESHOLD;
5363
5364 definite = 0;
5365 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2)
5366 {
5367 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5368 if (thumb_insn_size (inst1) == 2)
5369 {
5370 definite = 1;
5371 break;
5372 }
5373 }
5374
5375 /* At this point, if DEFINITE, BUF[I] is the first place we
5376 are sure that we know the instruction boundaries, and it is far
5377 enough from BPADDR that we could not miss an IT instruction
5378 affecting BPADDR. If ! DEFINITE, give up - start from a
5379 known boundary. */
5380 if (! definite)
5381 {
5382 buf = extend_buffer_earlier (buf, bpaddr, buf_len,
5383 bpaddr - boundary);
5384 if (buf == NULL)
5385 return bpaddr;
5386 buf_len = bpaddr - boundary;
5387 i = 0;
5388 }
5389 }
5390 else
5391 {
5392 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary);
5393 if (buf == NULL)
5394 return bpaddr;
5395 buf_len = bpaddr - boundary;
5396 i = 0;
5397 }
5398
5399 /* Scan forwards. Find the last IT instruction before BPADDR. */
5400 last_it = -1;
5401 last_it_count = 0;
5402 while (i < buf_len)
5403 {
5404 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order);
5405 last_it_count--;
5406 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0)
5407 {
5408 last_it = i;
5409 if (inst1 & 0x0001)
5410 last_it_count = 4;
5411 else if (inst1 & 0x0002)
5412 last_it_count = 3;
5413 else if (inst1 & 0x0004)
5414 last_it_count = 2;
5415 else
5416 last_it_count = 1;
5417 }
5418 i += thumb_insn_size (inst1);
5419 }
5420
5421 xfree (buf);
5422
5423 if (last_it == -1)
5424 /* There wasn't really an IT instruction after all. */
5425 return bpaddr;
5426
5427 if (last_it_count < 1)
5428 /* It was too far away. */
5429 return bpaddr;
5430
5431 /* This really is a trouble spot. Move the breakpoint to the IT
5432 instruction. */
5433 return bpaddr - buf_len + last_it;
5434 }
5435
5436 /* ARM displaced stepping support.
5437
5438 Generally ARM displaced stepping works as follows:
5439
5440 1. When an instruction is to be single-stepped, it is first decoded by
5441 arm_process_displaced_insn (called from arm_displaced_step_copy_insn).
5442 Depending on the type of instruction, it is then copied to a scratch
5443 location, possibly in a modified form. The copy_* set of functions
5444 performs such modification, as necessary. A breakpoint is placed after
5445 the modified instruction in the scratch space to return control to GDB.
5446 Note in particular that instructions which modify the PC will no longer
5447 do so after modification.
5448
5449 2. The instruction is single-stepped, by setting the PC to the scratch
5450 location address, and resuming. Control returns to GDB when the
5451 breakpoint is hit.
5452
5453 3. A cleanup function (cleanup_*) is called corresponding to the copy_*
5454 function used for the current instruction. This function's job is to
5455 put the CPU/memory state back to what it would have been if the
5456 instruction had been executed unmodified in its original location. */
5457
5458 /* NOP instruction (mov r0, r0). */
5459 #define ARM_NOP 0xe1a00000
5460 #define THUMB_NOP 0x4600
5461
5462 /* Helper for register reads for displaced stepping. In particular, this
5463 returns the PC as it would be seen by the instruction at its original
5464 location. */
5465
5466 ULONGEST
5467 displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5468 int regno)
5469 {
5470 ULONGEST ret;
5471 CORE_ADDR from = dsc->insn_addr;
5472
5473 if (regno == ARM_PC_REGNUM)
5474 {
5475 /* Compute pipeline offset:
5476 - When executing an ARM instruction, PC reads as the address of the
5477 current instruction plus 8.
5478 - When executing a Thumb instruction, PC reads as the address of the
5479 current instruction plus 4. */
5480
5481 if (!dsc->is_thumb)
5482 from += 8;
5483 else
5484 from += 4;
5485
5486 if (debug_displaced)
5487 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n",
5488 (unsigned long) from);
5489 return (ULONGEST) from;
5490 }
5491 else
5492 {
5493 regcache_cooked_read_unsigned (regs, regno, &ret);
5494 if (debug_displaced)
5495 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n",
5496 regno, (unsigned long) ret);
5497 return ret;
5498 }
5499 }
5500
5501 static int
5502 displaced_in_arm_mode (struct regcache *regs)
5503 {
5504 ULONGEST ps;
5505 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5506
5507 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5508
5509 return (ps & t_bit) == 0;
5510 }
5511
5512 /* Write to the PC as from a branch instruction. */
5513
5514 static void
5515 branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5516 ULONGEST val)
5517 {
5518 if (!dsc->is_thumb)
5519 /* Note: If bits 0/1 are set, this branch would be unpredictable for
5520 architecture versions < 6. */
5521 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5522 val & ~(ULONGEST) 0x3);
5523 else
5524 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
5525 val & ~(ULONGEST) 0x1);
5526 }
5527
5528 /* Write to the PC as from a branch-exchange instruction. */
5529
5530 static void
5531 bx_write_pc (struct regcache *regs, ULONGEST val)
5532 {
5533 ULONGEST ps;
5534 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs));
5535
5536 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps);
5537
5538 if ((val & 1) == 1)
5539 {
5540 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit);
5541 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe);
5542 }
5543 else if ((val & 2) == 0)
5544 {
5545 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5546 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val);
5547 }
5548 else
5549 {
5550 /* Unpredictable behaviour. Try to do something sensible (switch to ARM
5551 mode, align dest to 4 bytes). */
5552 warning (_("Single-stepping BX to non-word-aligned ARM instruction."));
5553 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit);
5554 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc);
5555 }
5556 }
5557
5558 /* Write to the PC as if from a load instruction. */
5559
5560 static void
5561 load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5562 ULONGEST val)
5563 {
5564 if (DISPLACED_STEPPING_ARCH_VERSION >= 5)
5565 bx_write_pc (regs, val);
5566 else
5567 branch_write_pc (regs, dsc, val);
5568 }
5569
5570 /* Write to the PC as if from an ALU instruction. */
5571
5572 static void
5573 alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc,
5574 ULONGEST val)
5575 {
5576 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb)
5577 bx_write_pc (regs, val);
5578 else
5579 branch_write_pc (regs, dsc, val);
5580 }
5581
5582 /* Helper for writing to registers for displaced stepping. Writing to the PC
5583 has a varying effects depending on the instruction which does the write:
5584 this is controlled by the WRITE_PC argument. */
5585
5586 void
5587 displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc,
5588 int regno, ULONGEST val, enum pc_write_style write_pc)
5589 {
5590 if (regno == ARM_PC_REGNUM)
5591 {
5592 if (debug_displaced)
5593 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n",
5594 (unsigned long) val);
5595 switch (write_pc)
5596 {
5597 case BRANCH_WRITE_PC:
5598 branch_write_pc (regs, dsc, val);
5599 break;
5600
5601 case BX_WRITE_PC:
5602 bx_write_pc (regs, val);
5603 break;
5604
5605 case LOAD_WRITE_PC:
5606 load_write_pc (regs, dsc, val);
5607 break;
5608
5609 case ALU_WRITE_PC:
5610 alu_write_pc (regs, dsc, val);
5611 break;
5612
5613 case CANNOT_WRITE_PC:
5614 warning (_("Instruction wrote to PC in an unexpected way when "
5615 "single-stepping"));
5616 break;
5617
5618 default:
5619 internal_error (__FILE__, __LINE__,
5620 _("Invalid argument to displaced_write_reg"));
5621 }
5622
5623 dsc->wrote_to_pc = 1;
5624 }
5625 else
5626 {
5627 if (debug_displaced)
5628 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n",
5629 regno, (unsigned long) val);
5630 regcache_cooked_write_unsigned (regs, regno, val);
5631 }
5632 }
5633
5634 /* This function is used to concisely determine if an instruction INSN
5635 references PC. Register fields of interest in INSN should have the
5636 corresponding fields of BITMASK set to 0b1111. The function
5637 returns return 1 if any of these fields in INSN reference the PC
5638 (also 0b1111, r15), else it returns 0. */
5639
5640 static int
5641 insn_references_pc (uint32_t insn, uint32_t bitmask)
5642 {
5643 uint32_t lowbit = 1;
5644
5645 while (bitmask != 0)
5646 {
5647 uint32_t mask;
5648
5649 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1)
5650 ;
5651
5652 if (!lowbit)
5653 break;
5654
5655 mask = lowbit * 0xf;
5656
5657 if ((insn & mask) == mask)
5658 return 1;
5659
5660 bitmask &= ~mask;
5661 }
5662
5663 return 0;
5664 }
5665
5666 /* The simplest copy function. Many instructions have the same effect no
5667 matter what address they are executed at: in those cases, use this. */
5668
5669 static int
5670 arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
5671 const char *iname, struct displaced_step_closure *dsc)
5672 {
5673 if (debug_displaced)
5674 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, "
5675 "opcode/class '%s' unmodified\n", (unsigned long) insn,
5676 iname);
5677
5678 dsc->modinsn[0] = insn;
5679
5680 return 0;
5681 }
5682
5683 static int
5684 thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
5685 uint16_t insn2, const char *iname,
5686 struct displaced_step_closure *dsc)
5687 {
5688 if (debug_displaced)
5689 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
5690 "opcode/class '%s' unmodified\n", insn1, insn2,
5691 iname);
5692
5693 dsc->modinsn[0] = insn1;
5694 dsc->modinsn[1] = insn2;
5695 dsc->numinsns = 2;
5696
5697 return 0;
5698 }
5699
5700 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
5701 modification. */
5702 static int
5703 thumb_copy_unmodified_16bit (struct gdbarch *gdbarch, unsigned int insn,
5704 const char *iname,
5705 struct displaced_step_closure *dsc)
5706 {
5707 if (debug_displaced)
5708 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x, "
5709 "opcode/class '%s' unmodified\n", insn,
5710 iname);
5711
5712 dsc->modinsn[0] = insn;
5713
5714 return 0;
5715 }
5716
5717 /* Preload instructions with immediate offset. */
5718
5719 static void
5720 cleanup_preload (struct gdbarch *gdbarch,
5721 struct regcache *regs, struct displaced_step_closure *dsc)
5722 {
5723 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5724 if (!dsc->u.preload.immed)
5725 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
5726 }
5727
5728 static void
5729 install_preload (struct gdbarch *gdbarch, struct regcache *regs,
5730 struct displaced_step_closure *dsc, unsigned int rn)
5731 {
5732 ULONGEST rn_val;
5733 /* Preload instructions:
5734
5735 {pli/pld} [rn, #+/-imm]
5736 ->
5737 {pli/pld} [r0, #+/-imm]. */
5738
5739 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5740 rn_val = displaced_read_reg (regs, dsc, rn);
5741 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5742 dsc->u.preload.immed = 1;
5743
5744 dsc->cleanup = &cleanup_preload;
5745 }
5746
5747 static int
5748 arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
5749 struct displaced_step_closure *dsc)
5750 {
5751 unsigned int rn = bits (insn, 16, 19);
5752
5753 if (!insn_references_pc (insn, 0x000f0000ul))
5754 return arm_copy_unmodified (gdbarch, insn, "preload", dsc);
5755
5756 if (debug_displaced)
5757 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5758 (unsigned long) insn);
5759
5760 dsc->modinsn[0] = insn & 0xfff0ffff;
5761
5762 install_preload (gdbarch, regs, dsc, rn);
5763
5764 return 0;
5765 }
5766
5767 static int
5768 thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
5769 struct regcache *regs, struct displaced_step_closure *dsc)
5770 {
5771 unsigned int rn = bits (insn1, 0, 3);
5772 unsigned int u_bit = bit (insn1, 7);
5773 int imm12 = bits (insn2, 0, 11);
5774 ULONGEST pc_val;
5775
5776 if (rn != ARM_PC_REGNUM)
5777 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
5778
5779 /* PC is only allowed to use in PLI (immediate,literal) Encoding T3, and
5780 PLD (literal) Encoding T1. */
5781 if (debug_displaced)
5782 fprintf_unfiltered (gdb_stdlog,
5783 "displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
5784 (unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
5785 imm12);
5786
5787 if (!u_bit)
5788 imm12 = -1 * imm12;
5789
5790 /* Rewrite instruction {pli/pld} PC imm12 into:
5791 Prepare: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
5792
5793 {pli/pld} [r0, r1]
5794
5795 Cleanup: r0 <- tmp[0], r1 <- tmp[1]. */
5796
5797 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5798 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5799
5800 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
5801
5802 displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
5803 displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
5804 dsc->u.preload.immed = 0;
5805
5806 /* {pli/pld} [r0, r1] */
5807 dsc->modinsn[0] = insn1 & 0xfff0;
5808 dsc->modinsn[1] = 0xf001;
5809 dsc->numinsns = 2;
5810
5811 dsc->cleanup = &cleanup_preload;
5812 return 0;
5813 }
5814
5815 /* Preload instructions with register offset. */
5816
5817 static void
5818 install_preload_reg(struct gdbarch *gdbarch, struct regcache *regs,
5819 struct displaced_step_closure *dsc, unsigned int rn,
5820 unsigned int rm)
5821 {
5822 ULONGEST rn_val, rm_val;
5823
5824 /* Preload register-offset instructions:
5825
5826 {pli/pld} [rn, rm {, shift}]
5827 ->
5828 {pli/pld} [r0, r1 {, shift}]. */
5829
5830 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5831 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
5832 rn_val = displaced_read_reg (regs, dsc, rn);
5833 rm_val = displaced_read_reg (regs, dsc, rm);
5834 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5835 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC);
5836 dsc->u.preload.immed = 0;
5837
5838 dsc->cleanup = &cleanup_preload;
5839 }
5840
5841 static int
5842 arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
5843 struct regcache *regs,
5844 struct displaced_step_closure *dsc)
5845 {
5846 unsigned int rn = bits (insn, 16, 19);
5847 unsigned int rm = bits (insn, 0, 3);
5848
5849
5850 if (!insn_references_pc (insn, 0x000f000ful))
5851 return arm_copy_unmodified (gdbarch, insn, "preload reg", dsc);
5852
5853 if (debug_displaced)
5854 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n",
5855 (unsigned long) insn);
5856
5857 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1;
5858
5859 install_preload_reg (gdbarch, regs, dsc, rn, rm);
5860 return 0;
5861 }
5862
5863 /* Copy/cleanup coprocessor load and store instructions. */
5864
5865 static void
5866 cleanup_copro_load_store (struct gdbarch *gdbarch,
5867 struct regcache *regs,
5868 struct displaced_step_closure *dsc)
5869 {
5870 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0);
5871
5872 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
5873
5874 if (dsc->u.ldst.writeback)
5875 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC);
5876 }
5877
5878 static void
5879 install_copro_load_store (struct gdbarch *gdbarch, struct regcache *regs,
5880 struct displaced_step_closure *dsc,
5881 int writeback, unsigned int rn)
5882 {
5883 ULONGEST rn_val;
5884
5885 /* Coprocessor load/store instructions:
5886
5887 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes)
5888 ->
5889 {stc/stc2} [r0, #+/-imm].
5890
5891 ldc/ldc2 are handled identically. */
5892
5893 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
5894 rn_val = displaced_read_reg (regs, dsc, rn);
5895 /* PC should be 4-byte aligned. */
5896 rn_val = rn_val & 0xfffffffc;
5897 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC);
5898
5899 dsc->u.ldst.writeback = writeback;
5900 dsc->u.ldst.rn = rn;
5901
5902 dsc->cleanup = &cleanup_copro_load_store;
5903 }
5904
5905 static int
5906 arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
5907 struct regcache *regs,
5908 struct displaced_step_closure *dsc)
5909 {
5910 unsigned int rn = bits (insn, 16, 19);
5911
5912 if (!insn_references_pc (insn, 0x000f0000ul))
5913 return arm_copy_unmodified (gdbarch, insn, "copro load/store", dsc);
5914
5915 if (debug_displaced)
5916 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5917 "load/store insn %.8lx\n", (unsigned long) insn);
5918
5919 dsc->modinsn[0] = insn & 0xfff0ffff;
5920
5921 install_copro_load_store (gdbarch, regs, dsc, bit (insn, 25), rn);
5922
5923 return 0;
5924 }
5925
5926 static int
5927 thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
5928 uint16_t insn2, struct regcache *regs,
5929 struct displaced_step_closure *dsc)
5930 {
5931 unsigned int rn = bits (insn1, 0, 3);
5932
5933 if (rn != ARM_PC_REGNUM)
5934 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
5935 "copro load/store", dsc);
5936
5937 if (debug_displaced)
5938 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
5939 "load/store insn %.4x%.4x\n", insn1, insn2);
5940
5941 dsc->modinsn[0] = insn1 & 0xfff0;
5942 dsc->modinsn[1] = insn2;
5943 dsc->numinsns = 2;
5944
5945 /* This function is called for copying instruction LDC/LDC2/VLDR, which
5946 doesn't support writeback, so pass 0. */
5947 install_copro_load_store (gdbarch, regs, dsc, 0, rn);
5948
5949 return 0;
5950 }
5951
5952 /* Clean up branch instructions (actually perform the branch, by setting
5953 PC). */
5954
5955 static void
5956 cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs,
5957 struct displaced_step_closure *dsc)
5958 {
5959 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
5960 int branch_taken = condition_true (dsc->u.branch.cond, status);
5961 enum pc_write_style write_pc = dsc->u.branch.exchange
5962 ? BX_WRITE_PC : BRANCH_WRITE_PC;
5963
5964 if (!branch_taken)
5965 return;
5966
5967 if (dsc->u.branch.link)
5968 {
5969 /* The value of LR should be the next insn of current one. In order
5970 not to confuse logic hanlding later insn `bx lr', if current insn mode
5971 is Thumb, the bit 0 of LR value should be set to 1. */
5972 ULONGEST next_insn_addr = dsc->insn_addr + dsc->insn_size;
5973
5974 if (dsc->is_thumb)
5975 next_insn_addr |= 0x1;
5976
5977 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, next_insn_addr,
5978 CANNOT_WRITE_PC);
5979 }
5980
5981 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc);
5982 }
5983
5984 /* Copy B/BL/BLX instructions with immediate destinations. */
5985
5986 static void
5987 install_b_bl_blx (struct gdbarch *gdbarch, struct regcache *regs,
5988 struct displaced_step_closure *dsc,
5989 unsigned int cond, int exchange, int link, long offset)
5990 {
5991 /* Implement "BL<cond> <label>" as:
5992
5993 Preparation: cond <- instruction condition
5994 Insn: mov r0, r0 (nop)
5995 Cleanup: if (condition true) { r14 <- pc; pc <- label }.
5996
5997 B<cond> similar, but don't set r14 in cleanup. */
5998
5999 dsc->u.branch.cond = cond;
6000 dsc->u.branch.link = link;
6001 dsc->u.branch.exchange = exchange;
6002
6003 dsc->u.branch.dest = dsc->insn_addr;
6004 if (link && exchange)
6005 /* For BLX, offset is computed from the Align (PC, 4). */
6006 dsc->u.branch.dest = dsc->u.branch.dest & 0xfffffffc;
6007
6008 if (dsc->is_thumb)
6009 dsc->u.branch.dest += 4 + offset;
6010 else
6011 dsc->u.branch.dest += 8 + offset;
6012
6013 dsc->cleanup = &cleanup_branch;
6014 }
6015 static int
6016 arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
6017 struct regcache *regs, struct displaced_step_closure *dsc)
6018 {
6019 unsigned int cond = bits (insn, 28, 31);
6020 int exchange = (cond == 0xf);
6021 int link = exchange || bit (insn, 24);
6022 long offset;
6023
6024 if (debug_displaced)
6025 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
6026 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b",
6027 (unsigned long) insn);
6028 if (exchange)
6029 /* For BLX, set bit 0 of the destination. The cleanup_branch function will
6030 then arrange the switch into Thumb mode. */
6031 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1;
6032 else
6033 offset = bits (insn, 0, 23) << 2;
6034
6035 if (bit (offset, 25))
6036 offset = offset | ~0x3ffffff;
6037
6038 dsc->modinsn[0] = ARM_NOP;
6039
6040 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6041 return 0;
6042 }
6043
6044 static int
6045 thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
6046 uint16_t insn2, struct regcache *regs,
6047 struct displaced_step_closure *dsc)
6048 {
6049 int link = bit (insn2, 14);
6050 int exchange = link && !bit (insn2, 12);
6051 int cond = INST_AL;
6052 long offset = 0;
6053 int j1 = bit (insn2, 13);
6054 int j2 = bit (insn2, 11);
6055 int s = sbits (insn1, 10, 10);
6056 int i1 = !(j1 ^ bit (insn1, 10));
6057 int i2 = !(j2 ^ bit (insn1, 10));
6058
6059 if (!link && !exchange) /* B */
6060 {
6061 offset = (bits (insn2, 0, 10) << 1);
6062 if (bit (insn2, 12)) /* Encoding T4 */
6063 {
6064 offset |= (bits (insn1, 0, 9) << 12)
6065 | (i2 << 22)
6066 | (i1 << 23)
6067 | (s << 24);
6068 cond = INST_AL;
6069 }
6070 else /* Encoding T3 */
6071 {
6072 offset |= (bits (insn1, 0, 5) << 12)
6073 | (j1 << 18)
6074 | (j2 << 19)
6075 | (s << 20);
6076 cond = bits (insn1, 6, 9);
6077 }
6078 }
6079 else
6080 {
6081 offset = (bits (insn1, 0, 9) << 12);
6082 offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
6083 offset |= exchange ?
6084 (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
6085 }
6086
6087 if (debug_displaced)
6088 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
6089 "%.4x %.4x with offset %.8lx\n",
6090 link ? (exchange) ? "blx" : "bl" : "b",
6091 insn1, insn2, offset);
6092
6093 dsc->modinsn[0] = THUMB_NOP;
6094
6095 install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
6096 return 0;
6097 }
6098
6099 /* Copy B Thumb instructions. */
6100 static int
6101 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
6102 struct displaced_step_closure *dsc)
6103 {
6104 unsigned int cond = 0;
6105 int offset = 0;
6106 unsigned short bit_12_15 = bits (insn, 12, 15);
6107 CORE_ADDR from = dsc->insn_addr;
6108
6109 if (bit_12_15 == 0xd)
6110 {
6111 /* offset = SignExtend (imm8:0, 32) */
6112 offset = sbits ((insn << 1), 0, 8);
6113 cond = bits (insn, 8, 11);
6114 }
6115 else if (bit_12_15 == 0xe) /* Encoding T2 */
6116 {
6117 offset = sbits ((insn << 1), 0, 11);
6118 cond = INST_AL;
6119 }
6120
6121 if (debug_displaced)
6122 fprintf_unfiltered (gdb_stdlog,
6123 "displaced: copying b immediate insn %.4x "
6124 "with offset %d\n", insn, offset);
6125
6126 dsc->u.branch.cond = cond;
6127 dsc->u.branch.link = 0;
6128 dsc->u.branch.exchange = 0;
6129 dsc->u.branch.dest = from + 4 + offset;
6130
6131 dsc->modinsn[0] = THUMB_NOP;
6132
6133 dsc->cleanup = &cleanup_branch;
6134
6135 return 0;
6136 }
6137
6138 /* Copy BX/BLX with register-specified destinations. */
6139
6140 static void
6141 install_bx_blx_reg (struct gdbarch *gdbarch, struct regcache *regs,
6142 struct displaced_step_closure *dsc, int link,
6143 unsigned int cond, unsigned int rm)
6144 {
6145 /* Implement {BX,BLX}<cond> <reg>" as:
6146
6147 Preparation: cond <- instruction condition
6148 Insn: mov r0, r0 (nop)
6149 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }.
6150
6151 Don't set r14 in cleanup for BX. */
6152
6153 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm);
6154
6155 dsc->u.branch.cond = cond;
6156 dsc->u.branch.link = link;
6157
6158 dsc->u.branch.exchange = 1;
6159
6160 dsc->cleanup = &cleanup_branch;
6161 }
6162
6163 static int
6164 arm_copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn,
6165 struct regcache *regs, struct displaced_step_closure *dsc)
6166 {
6167 unsigned int cond = bits (insn, 28, 31);
6168 /* BX: x12xxx1x
6169 BLX: x12xxx3x. */
6170 int link = bit (insn, 5);
6171 unsigned int rm = bits (insn, 0, 3);
6172
6173 if (debug_displaced)
6174 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx",
6175 (unsigned long) insn);
6176
6177 dsc->modinsn[0] = ARM_NOP;
6178
6179 install_bx_blx_reg (gdbarch, regs, dsc, link, cond, rm);
6180 return 0;
6181 }
6182
6183 static int
6184 thumb_copy_bx_blx_reg (struct gdbarch *gdbarch, uint16_t insn,
6185 struct regcache *regs,
6186 struct displaced_step_closure *dsc)
6187 {
6188 int link = bit (insn, 7);
6189 unsigned int rm = bits (insn, 3, 6);
6190
6191 if (debug_displaced)
6192 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x",
6193 (unsigned short) insn);
6194
6195 dsc->modinsn[0] = THUMB_NOP;
6196
6197 install_bx_blx_reg (gdbarch, regs, dsc, link, INST_AL, rm);
6198
6199 return 0;
6200 }
6201
6202
6203 /* Copy/cleanup arithmetic/logic instruction with immediate RHS. */
6204
6205 static void
6206 cleanup_alu_imm (struct gdbarch *gdbarch,
6207 struct regcache *regs, struct displaced_step_closure *dsc)
6208 {
6209 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6210 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6211 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6212 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6213 }
6214
6215 static int
6216 arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6217 struct displaced_step_closure *dsc)
6218 {
6219 unsigned int rn = bits (insn, 16, 19);
6220 unsigned int rd = bits (insn, 12, 15);
6221 unsigned int op = bits (insn, 21, 24);
6222 int is_mov = (op == 0xd);
6223 ULONGEST rd_val, rn_val;
6224
6225 if (!insn_references_pc (insn, 0x000ff000ul))
6226 return arm_copy_unmodified (gdbarch, insn, "ALU immediate", dsc);
6227
6228 if (debug_displaced)
6229 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn "
6230 "%.8lx\n", is_mov ? "move" : "ALU",
6231 (unsigned long) insn);
6232
6233 /* Instruction is of form:
6234
6235 <op><cond> rd, [rn,] #imm
6236
6237 Rewrite as:
6238
6239 Preparation: tmp1, tmp2 <- r0, r1;
6240 r0, r1 <- rd, rn
6241 Insn: <op><cond> r0, r1, #imm
6242 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6243 */
6244
6245 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6246 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6247 rn_val = displaced_read_reg (regs, dsc, rn);
6248 rd_val = displaced_read_reg (regs, dsc, rd);
6249 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6250 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6251 dsc->rd = rd;
6252
6253 if (is_mov)
6254 dsc->modinsn[0] = insn & 0xfff00fff;
6255 else
6256 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000;
6257
6258 dsc->cleanup = &cleanup_alu_imm;
6259
6260 return 0;
6261 }
6262
6263 static int
6264 thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
6265 uint16_t insn2, struct regcache *regs,
6266 struct displaced_step_closure *dsc)
6267 {
6268 unsigned int op = bits (insn1, 5, 8);
6269 unsigned int rn, rm, rd;
6270 ULONGEST rd_val, rn_val;
6271
6272 rn = bits (insn1, 0, 3); /* Rn */
6273 rm = bits (insn2, 0, 3); /* Rm */
6274 rd = bits (insn2, 8, 11); /* Rd */
6275
6276 /* This routine is only called for instruction MOV. */
6277 gdb_assert (op == 0x2 && rn == 0xf);
6278
6279 if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
6280 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
6281
6282 if (debug_displaced)
6283 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
6284 "ALU", insn1, insn2);
6285
6286 /* Instruction is of form:
6287
6288 <op><cond> rd, [rn,] #imm
6289
6290 Rewrite as:
6291
6292 Preparation: tmp1, tmp2 <- r0, r1;
6293 r0, r1 <- rd, rn
6294 Insn: <op><cond> r0, r1, #imm
6295 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
6296 */
6297
6298 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6299 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6300 rn_val = displaced_read_reg (regs, dsc, rn);
6301 rd_val = displaced_read_reg (regs, dsc, rd);
6302 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6303 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6304 dsc->rd = rd;
6305
6306 dsc->modinsn[0] = insn1;
6307 dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
6308 dsc->numinsns = 2;
6309
6310 dsc->cleanup = &cleanup_alu_imm;
6311
6312 return 0;
6313 }
6314
6315 /* Copy/cleanup arithmetic/logic insns with register RHS. */
6316
6317 static void
6318 cleanup_alu_reg (struct gdbarch *gdbarch,
6319 struct regcache *regs, struct displaced_step_closure *dsc)
6320 {
6321 ULONGEST rd_val;
6322 int i;
6323
6324 rd_val = displaced_read_reg (regs, dsc, 0);
6325
6326 for (i = 0; i < 3; i++)
6327 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6328
6329 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6330 }
6331
6332 static void
6333 install_alu_reg (struct gdbarch *gdbarch, struct regcache *regs,
6334 struct displaced_step_closure *dsc,
6335 unsigned int rd, unsigned int rn, unsigned int rm)
6336 {
6337 ULONGEST rd_val, rn_val, rm_val;
6338
6339 /* Instruction is of form:
6340
6341 <op><cond> rd, [rn,] rm [, <shift>]
6342
6343 Rewrite as:
6344
6345 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2;
6346 r0, r1, r2 <- rd, rn, rm
6347 Insn: <op><cond> r0, r1, r2 [, <shift>]
6348 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3
6349 */
6350
6351 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6352 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6353 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6354 rd_val = displaced_read_reg (regs, dsc, rd);
6355 rn_val = displaced_read_reg (regs, dsc, rn);
6356 rm_val = displaced_read_reg (regs, dsc, rm);
6357 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6358 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6359 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6360 dsc->rd = rd;
6361
6362 dsc->cleanup = &cleanup_alu_reg;
6363 }
6364
6365 static int
6366 arm_copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
6367 struct displaced_step_closure *dsc)
6368 {
6369 unsigned int op = bits (insn, 21, 24);
6370 int is_mov = (op == 0xd);
6371
6372 if (!insn_references_pc (insn, 0x000ff00ful))
6373 return arm_copy_unmodified (gdbarch, insn, "ALU reg", dsc);
6374
6375 if (debug_displaced)
6376 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n",
6377 is_mov ? "move" : "ALU", (unsigned long) insn);
6378
6379 if (is_mov)
6380 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2;
6381 else
6382 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002;
6383
6384 install_alu_reg (gdbarch, regs, dsc, bits (insn, 12, 15), bits (insn, 16, 19),
6385 bits (insn, 0, 3));
6386 return 0;
6387 }
6388
6389 static int
6390 thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
6391 struct regcache *regs,
6392 struct displaced_step_closure *dsc)
6393 {
6394 unsigned rn, rm, rd;
6395
6396 rd = bits (insn, 3, 6);
6397 rn = (bit (insn, 7) << 3) | bits (insn, 0, 2);
6398 rm = 2;
6399
6400 if (rd != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6401 return thumb_copy_unmodified_16bit (gdbarch, insn, "ALU reg", dsc);
6402
6403 if (debug_displaced)
6404 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x\n",
6405 "ALU", (unsigned short) insn);
6406
6407 dsc->modinsn[0] = ((insn & 0xff00) | 0x08);
6408
6409 install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
6410
6411 return 0;
6412 }
6413
6414 /* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
6415
6416 static void
6417 cleanup_alu_shifted_reg (struct gdbarch *gdbarch,
6418 struct regcache *regs,
6419 struct displaced_step_closure *dsc)
6420 {
6421 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0);
6422 int i;
6423
6424 for (i = 0; i < 4; i++)
6425 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC);
6426
6427 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC);
6428 }
6429
6430 static void
6431 install_alu_shifted_reg (struct gdbarch *gdbarch, struct regcache *regs,
6432 struct displaced_step_closure *dsc,
6433 unsigned int rd, unsigned int rn, unsigned int rm,
6434 unsigned rs)
6435 {
6436 int i;
6437 ULONGEST rd_val, rn_val, rm_val, rs_val;
6438
6439 /* Instruction is of form:
6440
6441 <op><cond> rd, [rn,] rm, <shift> rs
6442
6443 Rewrite as:
6444
6445 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3
6446 r0, r1, r2, r3 <- rd, rn, rm, rs
6447 Insn: <op><cond> r0, r1, r2, <shift> r3
6448 Cleanup: tmp5 <- r0
6449 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4
6450 rd <- tmp5
6451 */
6452
6453 for (i = 0; i < 4; i++)
6454 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
6455
6456 rd_val = displaced_read_reg (regs, dsc, rd);
6457 rn_val = displaced_read_reg (regs, dsc, rn);
6458 rm_val = displaced_read_reg (regs, dsc, rm);
6459 rs_val = displaced_read_reg (regs, dsc, rs);
6460 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
6461 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
6462 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC);
6463 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC);
6464 dsc->rd = rd;
6465 dsc->cleanup = &cleanup_alu_shifted_reg;
6466 }
6467
6468 static int
6469 arm_copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn,
6470 struct regcache *regs,
6471 struct displaced_step_closure *dsc)
6472 {
6473 unsigned int op = bits (insn, 21, 24);
6474 int is_mov = (op == 0xd);
6475 unsigned int rd, rn, rm, rs;
6476
6477 if (!insn_references_pc (insn, 0x000fff0ful))
6478 return arm_copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc);
6479
6480 if (debug_displaced)
6481 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn "
6482 "%.8lx\n", is_mov ? "move" : "ALU",
6483 (unsigned long) insn);
6484
6485 rn = bits (insn, 16, 19);
6486 rm = bits (insn, 0, 3);
6487 rs = bits (insn, 8, 11);
6488 rd = bits (insn, 12, 15);
6489
6490 if (is_mov)
6491 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302;
6492 else
6493 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302;
6494
6495 install_alu_shifted_reg (gdbarch, regs, dsc, rd, rn, rm, rs);
6496
6497 return 0;
6498 }
6499
6500 /* Clean up load instructions. */
6501
6502 static void
6503 cleanup_load (struct gdbarch *gdbarch, struct regcache *regs,
6504 struct displaced_step_closure *dsc)
6505 {
6506 ULONGEST rt_val, rt_val2 = 0, rn_val;
6507
6508 rt_val = displaced_read_reg (regs, dsc, 0);
6509 if (dsc->u.ldst.xfersize == 8)
6510 rt_val2 = displaced_read_reg (regs, dsc, 1);
6511 rn_val = displaced_read_reg (regs, dsc, 2);
6512
6513 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6514 if (dsc->u.ldst.xfersize > 4)
6515 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6516 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6517 if (!dsc->u.ldst.immed)
6518 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6519
6520 /* Handle register writeback. */
6521 if (dsc->u.ldst.writeback)
6522 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6523 /* Put result in right place. */
6524 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC);
6525 if (dsc->u.ldst.xfersize == 8)
6526 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC);
6527 }
6528
6529 /* Clean up store instructions. */
6530
6531 static void
6532 cleanup_store (struct gdbarch *gdbarch, struct regcache *regs,
6533 struct displaced_step_closure *dsc)
6534 {
6535 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2);
6536
6537 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC);
6538 if (dsc->u.ldst.xfersize > 4)
6539 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC);
6540 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC);
6541 if (!dsc->u.ldst.immed)
6542 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC);
6543 if (!dsc->u.ldst.restore_r4)
6544 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC);
6545
6546 /* Writeback. */
6547 if (dsc->u.ldst.writeback)
6548 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC);
6549 }
6550
6551 /* Copy "extra" load/store instructions. These are halfword/doubleword
6552 transfers, which have a different encoding to byte/word transfers. */
6553
6554 static int
6555 arm_copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged,
6556 struct regcache *regs, struct displaced_step_closure *dsc)
6557 {
6558 unsigned int op1 = bits (insn, 20, 24);
6559 unsigned int op2 = bits (insn, 5, 6);
6560 unsigned int rt = bits (insn, 12, 15);
6561 unsigned int rn = bits (insn, 16, 19);
6562 unsigned int rm = bits (insn, 0, 3);
6563 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1};
6564 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2};
6565 int immed = (op1 & 0x4) != 0;
6566 int opcode;
6567 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0;
6568
6569 if (!insn_references_pc (insn, 0x000ff00ful))
6570 return arm_copy_unmodified (gdbarch, insn, "extra load/store", dsc);
6571
6572 if (debug_displaced)
6573 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store "
6574 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "",
6575 (unsigned long) insn);
6576
6577 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4;
6578
6579 if (opcode < 0)
6580 internal_error (__FILE__, __LINE__,
6581 _("copy_extra_ld_st: instruction decode error"));
6582
6583 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6584 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
6585 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6586 if (!immed)
6587 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6588
6589 rt_val = displaced_read_reg (regs, dsc, rt);
6590 if (bytesize[opcode] == 8)
6591 rt_val2 = displaced_read_reg (regs, dsc, rt + 1);
6592 rn_val = displaced_read_reg (regs, dsc, rn);
6593 if (!immed)
6594 rm_val = displaced_read_reg (regs, dsc, rm);
6595
6596 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6597 if (bytesize[opcode] == 8)
6598 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC);
6599 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6600 if (!immed)
6601 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6602
6603 dsc->rd = rt;
6604 dsc->u.ldst.xfersize = bytesize[opcode];
6605 dsc->u.ldst.rn = rn;
6606 dsc->u.ldst.immed = immed;
6607 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0;
6608 dsc->u.ldst.restore_r4 = 0;
6609
6610 if (immed)
6611 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm]
6612 ->
6613 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */
6614 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6615 else
6616 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm]
6617 ->
6618 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */
6619 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6620
6621 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store;
6622
6623 return 0;
6624 }
6625
6626 /* Copy byte/half word/word loads and stores. */
6627
6628 static void
6629 install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
6630 struct displaced_step_closure *dsc, int load,
6631 int immed, int writeback, int size, int usermode,
6632 int rt, int rm, int rn)
6633 {
6634 ULONGEST rt_val, rn_val, rm_val = 0;
6635
6636 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6637 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6638 if (!immed)
6639 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6640 if (!load)
6641 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4);
6642
6643 rt_val = displaced_read_reg (regs, dsc, rt);
6644 rn_val = displaced_read_reg (regs, dsc, rn);
6645 if (!immed)
6646 rm_val = displaced_read_reg (regs, dsc, rm);
6647
6648 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC);
6649 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC);
6650 if (!immed)
6651 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC);
6652 dsc->rd = rt;
6653 dsc->u.ldst.xfersize = size;
6654 dsc->u.ldst.rn = rn;
6655 dsc->u.ldst.immed = immed;
6656 dsc->u.ldst.writeback = writeback;
6657
6658 /* To write PC we can do:
6659
6660 Before this sequence of instructions:
6661 r0 is the PC value got from displaced_read_reg, so r0 = from + 8;
6662 r2 is the Rn value got from dispalced_read_reg.
6663
6664 Insn1: push {pc} Write address of STR instruction + offset on stack
6665 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset
6666 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc
6667 = addr(Insn1) + offset - addr(Insn3) - 8
6668 = offset - 16
6669 Insn4: add r4, r4, #8 r4 = offset - 8
6670 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8
6671 = from + offset
6672 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3])
6673
6674 Otherwise we don't know what value to write for PC, since the offset is
6675 architecture-dependent (sometimes PC+8, sometimes PC+12). More details
6676 of this can be found in Section "Saving from r15" in
6677 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */
6678
6679 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6680 }
6681
6682
6683 static int
6684 thumb2_copy_load_literal (struct gdbarch *gdbarch, uint16_t insn1,
6685 uint16_t insn2, struct regcache *regs,
6686 struct displaced_step_closure *dsc, int size)
6687 {
6688 unsigned int u_bit = bit (insn1, 7);
6689 unsigned int rt = bits (insn2, 12, 15);
6690 int imm12 = bits (insn2, 0, 11);
6691 ULONGEST pc_val;
6692
6693 if (debug_displaced)
6694 fprintf_unfiltered (gdb_stdlog,
6695 "displaced: copying ldr pc (0x%x) R%d %c imm12 %.4x\n",
6696 (unsigned int) dsc->insn_addr, rt, u_bit ? '+' : '-',
6697 imm12);
6698
6699 if (!u_bit)
6700 imm12 = -1 * imm12;
6701
6702 /* Rewrite instruction LDR Rt imm12 into:
6703
6704 Prepare: tmp[0] <- r0, tmp[1] <- r2, tmp[2] <- r3, r2 <- pc, r3 <- imm12
6705
6706 LDR R0, R2, R3,
6707
6708 Cleanup: rt <- r0, r0 <- tmp[0], r2 <- tmp[1], r3 <- tmp[2]. */
6709
6710
6711 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
6712 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
6713 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
6714
6715 pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
6716
6717 pc_val = pc_val & 0xfffffffc;
6718
6719 displaced_write_reg (regs, dsc, 2, pc_val, CANNOT_WRITE_PC);
6720 displaced_write_reg (regs, dsc, 3, imm12, CANNOT_WRITE_PC);
6721
6722 dsc->rd = rt;
6723
6724 dsc->u.ldst.xfersize = size;
6725 dsc->u.ldst.immed = 0;
6726 dsc->u.ldst.writeback = 0;
6727 dsc->u.ldst.restore_r4 = 0;
6728
6729 /* LDR R0, R2, R3 */
6730 dsc->modinsn[0] = 0xf852;
6731 dsc->modinsn[1] = 0x3;
6732 dsc->numinsns = 2;
6733
6734 dsc->cleanup = &cleanup_load;
6735
6736 return 0;
6737 }
6738
6739 static int
6740 thumb2_copy_load_reg_imm (struct gdbarch *gdbarch, uint16_t insn1,
6741 uint16_t insn2, struct regcache *regs,
6742 struct displaced_step_closure *dsc,
6743 int writeback, int immed)
6744 {
6745 unsigned int rt = bits (insn2, 12, 15);
6746 unsigned int rn = bits (insn1, 0, 3);
6747 unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
6748 /* In LDR (register), there is also a register Rm, which is not allowed to
6749 be PC, so we don't have to check it. */
6750
6751 if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM)
6752 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load",
6753 dsc);
6754
6755 if (debug_displaced)
6756 fprintf_unfiltered (gdb_stdlog,
6757 "displaced: copying ldr r%d [r%d] insn %.4x%.4x\n",
6758 rt, rn, insn1, insn2);
6759
6760 install_load_store (gdbarch, regs, dsc, 1, immed, writeback, 4,
6761 0, rt, rm, rn);
6762
6763 dsc->u.ldst.restore_r4 = 0;
6764
6765 if (immed)
6766 /* ldr[b]<cond> rt, [rn, #imm], etc.
6767 ->
6768 ldr[b]<cond> r0, [r2, #imm]. */
6769 {
6770 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6771 dsc->modinsn[1] = insn2 & 0x0fff;
6772 }
6773 else
6774 /* ldr[b]<cond> rt, [rn, rm], etc.
6775 ->
6776 ldr[b]<cond> r0, [r2, r3]. */
6777 {
6778 dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
6779 dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
6780 }
6781
6782 dsc->numinsns = 2;
6783
6784 return 0;
6785 }
6786
6787
6788 static int
6789 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
6790 struct regcache *regs,
6791 struct displaced_step_closure *dsc,
6792 int load, int size, int usermode)
6793 {
6794 int immed = !bit (insn, 25);
6795 int writeback = (bit (insn, 24) == 0 || bit (insn, 21) != 0);
6796 unsigned int rt = bits (insn, 12, 15);
6797 unsigned int rn = bits (insn, 16, 19);
6798 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */
6799
6800 if (!insn_references_pc (insn, 0x000ff00ful))
6801 return arm_copy_unmodified (gdbarch, insn, "load/store", dsc);
6802
6803 if (debug_displaced)
6804 fprintf_unfiltered (gdb_stdlog,
6805 "displaced: copying %s%s r%d [r%d] insn %.8lx\n",
6806 load ? (size == 1 ? "ldrb" : "ldr")
6807 : (size == 1 ? "strb" : "str"), usermode ? "t" : "",
6808 rt, rn,
6809 (unsigned long) insn);
6810
6811 install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
6812 usermode, rt, rm, rn);
6813
6814 if (load || rt != ARM_PC_REGNUM)
6815 {
6816 dsc->u.ldst.restore_r4 = 0;
6817
6818 if (immed)
6819 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
6820 ->
6821 {ldr,str}[b]<cond> r0, [r2, #imm]. */
6822 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000;
6823 else
6824 /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
6825 ->
6826 {ldr,str}[b]<cond> r0, [r2, r3]. */
6827 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003;
6828 }
6829 else
6830 {
6831 /* We need to use r4 as scratch. Make sure it's restored afterwards. */
6832 dsc->u.ldst.restore_r4 = 1;
6833 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */
6834 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */
6835 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */
6836 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */
6837 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */
6838
6839 /* As above. */
6840 if (immed)
6841 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000;
6842 else
6843 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003;
6844
6845 dsc->numinsns = 6;
6846 }
6847
6848 dsc->cleanup = load ? &cleanup_load : &cleanup_store;
6849
6850 return 0;
6851 }
6852
6853 /* Cleanup LDM instructions with fully-populated register list. This is an
6854 unfortunate corner case: it's impossible to implement correctly by modifying
6855 the instruction. The issue is as follows: we have an instruction,
6856
6857 ldm rN, {r0-r15}
6858
6859 which we must rewrite to avoid loading PC. A possible solution would be to
6860 do the load in two halves, something like (with suitable cleanup
6861 afterwards):
6862
6863 mov r8, rN
6864 ldm[id][ab] r8!, {r0-r7}
6865 str r7, <temp>
6866 ldm[id][ab] r8, {r7-r14}
6867 <bkpt>
6868
6869 but at present there's no suitable place for <temp>, since the scratch space
6870 is overwritten before the cleanup routine is called. For now, we simply
6871 emulate the instruction. */
6872
6873 static void
6874 cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs,
6875 struct displaced_step_closure *dsc)
6876 {
6877 int inc = dsc->u.block.increment;
6878 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0;
6879 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4);
6880 uint32_t regmask = dsc->u.block.regmask;
6881 int regno = inc ? 0 : 15;
6882 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr;
6883 int exception_return = dsc->u.block.load && dsc->u.block.user
6884 && (regmask & 0x8000) != 0;
6885 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6886 int do_transfer = condition_true (dsc->u.block.cond, status);
6887 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6888
6889 if (!do_transfer)
6890 return;
6891
6892 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything
6893 sensible we can do here. Complain loudly. */
6894 if (exception_return)
6895 error (_("Cannot single-step exception return"));
6896
6897 /* We don't handle any stores here for now. */
6898 gdb_assert (dsc->u.block.load != 0);
6899
6900 if (debug_displaced)
6901 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: "
6902 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm",
6903 dsc->u.block.increment ? "inc" : "dec",
6904 dsc->u.block.before ? "before" : "after");
6905
6906 while (regmask)
6907 {
6908 uint32_t memword;
6909
6910 if (inc)
6911 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0)
6912 regno++;
6913 else
6914 while (regno >= 0 && (regmask & (1 << regno)) == 0)
6915 regno--;
6916
6917 xfer_addr += bump_before;
6918
6919 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order);
6920 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC);
6921
6922 xfer_addr += bump_after;
6923
6924 regmask &= ~(1 << regno);
6925 }
6926
6927 if (dsc->u.block.writeback)
6928 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr,
6929 CANNOT_WRITE_PC);
6930 }
6931
6932 /* Clean up an STM which included the PC in the register list. */
6933
6934 static void
6935 cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs,
6936 struct displaced_step_closure *dsc)
6937 {
6938 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6939 int store_executed = condition_true (dsc->u.block.cond, status);
6940 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask);
6941 CORE_ADDR stm_insn_addr;
6942 uint32_t pc_val;
6943 long offset;
6944 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
6945
6946 /* If condition code fails, there's nothing else to do. */
6947 if (!store_executed)
6948 return;
6949
6950 if (dsc->u.block.increment)
6951 {
6952 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs;
6953
6954 if (dsc->u.block.before)
6955 pc_stored_at += 4;
6956 }
6957 else
6958 {
6959 pc_stored_at = dsc->u.block.xfer_addr;
6960
6961 if (dsc->u.block.before)
6962 pc_stored_at -= 4;
6963 }
6964
6965 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order);
6966 stm_insn_addr = dsc->scratch_base;
6967 offset = pc_val - stm_insn_addr;
6968
6969 if (debug_displaced)
6970 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for "
6971 "STM instruction\n", offset);
6972
6973 /* Rewrite the stored PC to the proper value for the non-displaced original
6974 instruction. */
6975 write_memory_unsigned_integer (pc_stored_at, 4, byte_order,
6976 dsc->insn_addr + offset);
6977 }
6978
6979 /* Clean up an LDM which includes the PC in the register list. We clumped all
6980 the registers in the transferred list into a contiguous range r0...rX (to
6981 avoid loading PC directly and losing control of the debugged program), so we
6982 must undo that here. */
6983
6984 static void
6985 cleanup_block_load_pc (struct gdbarch *gdbarch,
6986 struct regcache *regs,
6987 struct displaced_step_closure *dsc)
6988 {
6989 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM);
6990 int load_executed = condition_true (dsc->u.block.cond, status);
6991 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM;
6992 unsigned int regs_loaded = bitcount (mask);
6993 unsigned int num_to_shuffle = regs_loaded, clobbered;
6994
6995 /* The method employed here will fail if the register list is fully populated
6996 (we need to avoid loading PC directly). */
6997 gdb_assert (num_to_shuffle < 16);
6998
6999 if (!load_executed)
7000 return;
7001
7002 clobbered = (1 << num_to_shuffle) - 1;
7003
7004 while (num_to_shuffle > 0)
7005 {
7006 if ((mask & (1 << write_reg)) != 0)
7007 {
7008 unsigned int read_reg = num_to_shuffle - 1;
7009
7010 if (read_reg != write_reg)
7011 {
7012 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg);
7013 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC);
7014 if (debug_displaced)
7015 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move "
7016 "loaded register r%d to r%d\n"), read_reg,
7017 write_reg);
7018 }
7019 else if (debug_displaced)
7020 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register "
7021 "r%d already in the right place\n"),
7022 write_reg);
7023
7024 clobbered &= ~(1 << write_reg);
7025
7026 num_to_shuffle--;
7027 }
7028
7029 write_reg--;
7030 }
7031
7032 /* Restore any registers we scribbled over. */
7033 for (write_reg = 0; clobbered != 0; write_reg++)
7034 {
7035 if ((clobbered & (1 << write_reg)) != 0)
7036 {
7037 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg],
7038 CANNOT_WRITE_PC);
7039 if (debug_displaced)
7040 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored "
7041 "clobbered register r%d\n"), write_reg);
7042 clobbered &= ~(1 << write_reg);
7043 }
7044 }
7045
7046 /* Perform register writeback manually. */
7047 if (dsc->u.block.writeback)
7048 {
7049 ULONGEST new_rn_val = dsc->u.block.xfer_addr;
7050
7051 if (dsc->u.block.increment)
7052 new_rn_val += regs_loaded * 4;
7053 else
7054 new_rn_val -= regs_loaded * 4;
7055
7056 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val,
7057 CANNOT_WRITE_PC);
7058 }
7059 }
7060
7061 /* Handle ldm/stm, apart from some tricky cases which are unlikely to occur
7062 in user-level code (in particular exception return, ldm rn, {...pc}^). */
7063
7064 static int
7065 arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
7066 struct regcache *regs,
7067 struct displaced_step_closure *dsc)
7068 {
7069 int load = bit (insn, 20);
7070 int user = bit (insn, 22);
7071 int increment = bit (insn, 23);
7072 int before = bit (insn, 24);
7073 int writeback = bit (insn, 21);
7074 int rn = bits (insn, 16, 19);
7075
7076 /* Block transfers which don't mention PC can be run directly
7077 out-of-line. */
7078 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0)
7079 return arm_copy_unmodified (gdbarch, insn, "ldm/stm", dsc);
7080
7081 if (rn == ARM_PC_REGNUM)
7082 {
7083 warning (_("displaced: Unpredictable LDM or STM with "
7084 "base register r15"));
7085 return arm_copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc);
7086 }
7087
7088 if (debug_displaced)
7089 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7090 "%.8lx\n", (unsigned long) insn);
7091
7092 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7093 dsc->u.block.rn = rn;
7094
7095 dsc->u.block.load = load;
7096 dsc->u.block.user = user;
7097 dsc->u.block.increment = increment;
7098 dsc->u.block.before = before;
7099 dsc->u.block.writeback = writeback;
7100 dsc->u.block.cond = bits (insn, 28, 31);
7101
7102 dsc->u.block.regmask = insn & 0xffff;
7103
7104 if (load)
7105 {
7106 if ((insn & 0xffff) == 0xffff)
7107 {
7108 /* LDM with a fully-populated register list. This case is
7109 particularly tricky. Implement for now by fully emulating the
7110 instruction (which might not behave perfectly in all cases, but
7111 these instructions should be rare enough for that not to matter
7112 too much). */
7113 dsc->modinsn[0] = ARM_NOP;
7114
7115 dsc->cleanup = &cleanup_block_load_all;
7116 }
7117 else
7118 {
7119 /* LDM of a list of registers which includes PC. Implement by
7120 rewriting the list of registers to be transferred into a
7121 contiguous chunk r0...rX before doing the transfer, then shuffling
7122 registers into the correct places in the cleanup routine. */
7123 unsigned int regmask = insn & 0xffff;
7124 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7125 unsigned int to = 0, from = 0, i, new_rn;
7126
7127 for (i = 0; i < num_in_list; i++)
7128 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7129
7130 /* Writeback makes things complicated. We need to avoid clobbering
7131 the base register with one of the registers in our modified
7132 register list, but just using a different register can't work in
7133 all cases, e.g.:
7134
7135 ldm r14!, {r0-r13,pc}
7136
7137 which would need to be rewritten as:
7138
7139 ldm rN!, {r0-r14}
7140
7141 but that can't work, because there's no free register for N.
7142
7143 Solve this by turning off the writeback bit, and emulating
7144 writeback manually in the cleanup routine. */
7145
7146 if (writeback)
7147 insn &= ~(1 << 21);
7148
7149 new_regmask = (1 << num_in_list) - 1;
7150
7151 if (debug_displaced)
7152 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7153 "{..., pc}: original reg list %.4x, modified "
7154 "list %.4x\n"), rn, writeback ? "!" : "",
7155 (int) insn & 0xffff, new_regmask);
7156
7157 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff);
7158
7159 dsc->cleanup = &cleanup_block_load_pc;
7160 }
7161 }
7162 else
7163 {
7164 /* STM of a list of registers which includes PC. Run the instruction
7165 as-is, but out of line: this will store the wrong value for the PC,
7166 so we must manually fix up the memory in the cleanup routine.
7167 Doing things this way has the advantage that we can auto-detect
7168 the offset of the PC write (which is architecture-dependent) in
7169 the cleanup routine. */
7170 dsc->modinsn[0] = insn;
7171
7172 dsc->cleanup = &cleanup_block_store_pc;
7173 }
7174
7175 return 0;
7176 }
7177
7178 static int
7179 thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7180 struct regcache *regs,
7181 struct displaced_step_closure *dsc)
7182 {
7183 int rn = bits (insn1, 0, 3);
7184 int load = bit (insn1, 4);
7185 int writeback = bit (insn1, 5);
7186
7187 /* Block transfers which don't mention PC can be run directly
7188 out-of-line. */
7189 if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
7190 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
7191
7192 if (rn == ARM_PC_REGNUM)
7193 {
7194 warning (_("displaced: Unpredictable LDM or STM with "
7195 "base register r15"));
7196 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7197 "unpredictable ldm/stm", dsc);
7198 }
7199
7200 if (debug_displaced)
7201 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
7202 "%.4x%.4x\n", insn1, insn2);
7203
7204 /* Clear bit 13, since it should be always zero. */
7205 dsc->u.block.regmask = (insn2 & 0xdfff);
7206 dsc->u.block.rn = rn;
7207
7208 dsc->u.block.load = load;
7209 dsc->u.block.user = 0;
7210 dsc->u.block.increment = bit (insn1, 7);
7211 dsc->u.block.before = bit (insn1, 8);
7212 dsc->u.block.writeback = writeback;
7213 dsc->u.block.cond = INST_AL;
7214 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn);
7215
7216 if (load)
7217 {
7218 if (dsc->u.block.regmask == 0xffff)
7219 {
7220 /* This branch is impossible to happen. */
7221 gdb_assert (0);
7222 }
7223 else
7224 {
7225 unsigned int regmask = dsc->u.block.regmask;
7226 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
7227 unsigned int to = 0, from = 0, i, new_rn;
7228
7229 for (i = 0; i < num_in_list; i++)
7230 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
7231
7232 if (writeback)
7233 insn1 &= ~(1 << 5);
7234
7235 new_regmask = (1 << num_in_list) - 1;
7236
7237 if (debug_displaced)
7238 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
7239 "{..., pc}: original reg list %.4x, modified "
7240 "list %.4x\n"), rn, writeback ? "!" : "",
7241 (int) dsc->u.block.regmask, new_regmask);
7242
7243 dsc->modinsn[0] = insn1;
7244 dsc->modinsn[1] = (new_regmask & 0xffff);
7245 dsc->numinsns = 2;
7246
7247 dsc->cleanup = &cleanup_block_load_pc;
7248 }
7249 }
7250 else
7251 {
7252 dsc->modinsn[0] = insn1;
7253 dsc->modinsn[1] = insn2;
7254 dsc->numinsns = 2;
7255 dsc->cleanup = &cleanup_block_store_pc;
7256 }
7257 return 0;
7258 }
7259
7260 /* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
7261 for Linux, where some SVC instructions must be treated specially. */
7262
7263 static void
7264 cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs,
7265 struct displaced_step_closure *dsc)
7266 {
7267 CORE_ADDR resume_addr = dsc->insn_addr + dsc->insn_size;
7268
7269 if (debug_displaced)
7270 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at "
7271 "%.8lx\n", (unsigned long) resume_addr);
7272
7273 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC);
7274 }
7275
7276
7277 /* Common copy routine for svc instruciton. */
7278
7279 static int
7280 install_svc (struct gdbarch *gdbarch, struct regcache *regs,
7281 struct displaced_step_closure *dsc)
7282 {
7283 /* Preparation: none.
7284 Insn: unmodified svc.
7285 Cleanup: pc <- insn_addr + insn_size. */
7286
7287 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next
7288 instruction. */
7289 dsc->wrote_to_pc = 1;
7290
7291 /* Allow OS-specific code to override SVC handling. */
7292 if (dsc->u.svc.copy_svc_os)
7293 return dsc->u.svc.copy_svc_os (gdbarch, regs, dsc);
7294 else
7295 {
7296 dsc->cleanup = &cleanup_svc;
7297 return 0;
7298 }
7299 }
7300
7301 static int
7302 arm_copy_svc (struct gdbarch *gdbarch, uint32_t insn,
7303 struct regcache *regs, struct displaced_step_closure *dsc)
7304 {
7305
7306 if (debug_displaced)
7307 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n",
7308 (unsigned long) insn);
7309
7310 dsc->modinsn[0] = insn;
7311
7312 return install_svc (gdbarch, regs, dsc);
7313 }
7314
7315 static int
7316 thumb_copy_svc (struct gdbarch *gdbarch, uint16_t insn,
7317 struct regcache *regs, struct displaced_step_closure *dsc)
7318 {
7319
7320 if (debug_displaced)
7321 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.4x\n",
7322 insn);
7323
7324 dsc->modinsn[0] = insn;
7325
7326 return install_svc (gdbarch, regs, dsc);
7327 }
7328
7329 /* Copy undefined instructions. */
7330
7331 static int
7332 arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
7333 struct displaced_step_closure *dsc)
7334 {
7335 if (debug_displaced)
7336 fprintf_unfiltered (gdb_stdlog,
7337 "displaced: copying undefined insn %.8lx\n",
7338 (unsigned long) insn);
7339
7340 dsc->modinsn[0] = insn;
7341
7342 return 0;
7343 }
7344
7345 static int
7346 thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
7347 struct displaced_step_closure *dsc)
7348 {
7349
7350 if (debug_displaced)
7351 fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
7352 "%.4x %.4x\n", (unsigned short) insn1,
7353 (unsigned short) insn2);
7354
7355 dsc->modinsn[0] = insn1;
7356 dsc->modinsn[1] = insn2;
7357 dsc->numinsns = 2;
7358
7359 return 0;
7360 }
7361
7362 /* Copy unpredictable instructions. */
7363
7364 static int
7365 arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
7366 struct displaced_step_closure *dsc)
7367 {
7368 if (debug_displaced)
7369 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn "
7370 "%.8lx\n", (unsigned long) insn);
7371
7372 dsc->modinsn[0] = insn;
7373
7374 return 0;
7375 }
7376
7377 /* The decode_* functions are instruction decoding helpers. They mostly follow
7378 the presentation in the ARM ARM. */
7379
7380 static int
7381 arm_decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn,
7382 struct regcache *regs,
7383 struct displaced_step_closure *dsc)
7384 {
7385 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7);
7386 unsigned int rn = bits (insn, 16, 19);
7387
7388 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0)
7389 return arm_copy_unmodified (gdbarch, insn, "cps", dsc);
7390 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1)
7391 return arm_copy_unmodified (gdbarch, insn, "setend", dsc);
7392 else if ((op1 & 0x60) == 0x20)
7393 return arm_copy_unmodified (gdbarch, insn, "neon dataproc", dsc);
7394 else if ((op1 & 0x71) == 0x40)
7395 return arm_copy_unmodified (gdbarch, insn, "neon elt/struct load/store",
7396 dsc);
7397 else if ((op1 & 0x77) == 0x41)
7398 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7399 else if ((op1 & 0x77) == 0x45)
7400 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pli. */
7401 else if ((op1 & 0x77) == 0x51)
7402 {
7403 if (rn != 0xf)
7404 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7405 else
7406 return arm_copy_unpred (gdbarch, insn, dsc);
7407 }
7408 else if ((op1 & 0x77) == 0x55)
7409 return arm_copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */
7410 else if (op1 == 0x57)
7411 switch (op2)
7412 {
7413 case 0x1: return arm_copy_unmodified (gdbarch, insn, "clrex", dsc);
7414 case 0x4: return arm_copy_unmodified (gdbarch, insn, "dsb", dsc);
7415 case 0x5: return arm_copy_unmodified (gdbarch, insn, "dmb", dsc);
7416 case 0x6: return arm_copy_unmodified (gdbarch, insn, "isb", dsc);
7417 default: return arm_copy_unpred (gdbarch, insn, dsc);
7418 }
7419 else if ((op1 & 0x63) == 0x43)
7420 return arm_copy_unpred (gdbarch, insn, dsc);
7421 else if ((op2 & 0x1) == 0x0)
7422 switch (op1 & ~0x80)
7423 {
7424 case 0x61:
7425 return arm_copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc);
7426 case 0x65:
7427 return arm_copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */
7428 case 0x71: case 0x75:
7429 /* pld/pldw reg. */
7430 return arm_copy_preload_reg (gdbarch, insn, regs, dsc);
7431 case 0x63: case 0x67: case 0x73: case 0x77:
7432 return arm_copy_unpred (gdbarch, insn, dsc);
7433 default:
7434 return arm_copy_undef (gdbarch, insn, dsc);
7435 }
7436 else
7437 return arm_copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */
7438 }
7439
7440 static int
7441 arm_decode_unconditional (struct gdbarch *gdbarch, uint32_t insn,
7442 struct regcache *regs,
7443 struct displaced_step_closure *dsc)
7444 {
7445 if (bit (insn, 27) == 0)
7446 return arm_decode_misc_memhint_neon (gdbarch, insn, regs, dsc);
7447 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */
7448 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20))
7449 {
7450 case 0x0: case 0x2:
7451 return arm_copy_unmodified (gdbarch, insn, "srs", dsc);
7452
7453 case 0x1: case 0x3:
7454 return arm_copy_unmodified (gdbarch, insn, "rfe", dsc);
7455
7456 case 0x4: case 0x5: case 0x6: case 0x7:
7457 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7458
7459 case 0x8:
7460 switch ((insn & 0xe00000) >> 21)
7461 {
7462 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7:
7463 /* stc/stc2. */
7464 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7465
7466 case 0x2:
7467 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7468
7469 default:
7470 return arm_copy_undef (gdbarch, insn, dsc);
7471 }
7472
7473 case 0x9:
7474 {
7475 int rn_f = (bits (insn, 16, 19) == 0xf);
7476 switch ((insn & 0xe00000) >> 21)
7477 {
7478 case 0x1: case 0x3:
7479 /* ldc/ldc2 imm (undefined for rn == pc). */
7480 return rn_f ? arm_copy_undef (gdbarch, insn, dsc)
7481 : arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7482
7483 case 0x2:
7484 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7485
7486 case 0x4: case 0x5: case 0x6: case 0x7:
7487 /* ldc/ldc2 lit (undefined for rn != pc). */
7488 return rn_f ? arm_copy_copro_load_store (gdbarch, insn, regs, dsc)
7489 : arm_copy_undef (gdbarch, insn, dsc);
7490
7491 default:
7492 return arm_copy_undef (gdbarch, insn, dsc);
7493 }
7494 }
7495
7496 case 0xa:
7497 return arm_copy_unmodified (gdbarch, insn, "stc/stc2", dsc);
7498
7499 case 0xb:
7500 if (bits (insn, 16, 19) == 0xf)
7501 /* ldc/ldc2 lit. */
7502 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7503 else
7504 return arm_copy_undef (gdbarch, insn, dsc);
7505
7506 case 0xc:
7507 if (bit (insn, 4))
7508 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7509 else
7510 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7511
7512 case 0xd:
7513 if (bit (insn, 4))
7514 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7515 else
7516 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7517
7518 default:
7519 return arm_copy_undef (gdbarch, insn, dsc);
7520 }
7521 }
7522
7523 /* Decode miscellaneous instructions in dp/misc encoding space. */
7524
7525 static int
7526 arm_decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn,
7527 struct regcache *regs,
7528 struct displaced_step_closure *dsc)
7529 {
7530 unsigned int op2 = bits (insn, 4, 6);
7531 unsigned int op = bits (insn, 21, 22);
7532 unsigned int op1 = bits (insn, 16, 19);
7533
7534 switch (op2)
7535 {
7536 case 0x0:
7537 return arm_copy_unmodified (gdbarch, insn, "mrs/msr", dsc);
7538
7539 case 0x1:
7540 if (op == 0x1) /* bx. */
7541 return arm_copy_bx_blx_reg (gdbarch, insn, regs, dsc);
7542 else if (op == 0x3)
7543 return arm_copy_unmodified (gdbarch, insn, "clz", dsc);
7544 else
7545 return arm_copy_undef (gdbarch, insn, dsc);
7546
7547 case 0x2:
7548 if (op == 0x1)
7549 /* Not really supported. */
7550 return arm_copy_unmodified (gdbarch, insn, "bxj", dsc);
7551 else
7552 return arm_copy_undef (gdbarch, insn, dsc);
7553
7554 case 0x3:
7555 if (op == 0x1)
7556 return arm_copy_bx_blx_reg (gdbarch, insn,
7557 regs, dsc); /* blx register. */
7558 else
7559 return arm_copy_undef (gdbarch, insn, dsc);
7560
7561 case 0x5:
7562 return arm_copy_unmodified (gdbarch, insn, "saturating add/sub", dsc);
7563
7564 case 0x7:
7565 if (op == 0x1)
7566 return arm_copy_unmodified (gdbarch, insn, "bkpt", dsc);
7567 else if (op == 0x3)
7568 /* Not really supported. */
7569 return arm_copy_unmodified (gdbarch, insn, "smc", dsc);
7570
7571 default:
7572 return arm_copy_undef (gdbarch, insn, dsc);
7573 }
7574 }
7575
7576 static int
7577 arm_decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn,
7578 struct regcache *regs,
7579 struct displaced_step_closure *dsc)
7580 {
7581 if (bit (insn, 25))
7582 switch (bits (insn, 20, 24))
7583 {
7584 case 0x10:
7585 return arm_copy_unmodified (gdbarch, insn, "movw", dsc);
7586
7587 case 0x14:
7588 return arm_copy_unmodified (gdbarch, insn, "movt", dsc);
7589
7590 case 0x12: case 0x16:
7591 return arm_copy_unmodified (gdbarch, insn, "msr imm", dsc);
7592
7593 default:
7594 return arm_copy_alu_imm (gdbarch, insn, regs, dsc);
7595 }
7596 else
7597 {
7598 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7);
7599
7600 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0)
7601 return arm_copy_alu_reg (gdbarch, insn, regs, dsc);
7602 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1)
7603 return arm_copy_alu_shifted_reg (gdbarch, insn, regs, dsc);
7604 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0)
7605 return arm_decode_miscellaneous (gdbarch, insn, regs, dsc);
7606 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8)
7607 return arm_copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc);
7608 else if ((op1 & 0x10) == 0x00 && op2 == 0x9)
7609 return arm_copy_unmodified (gdbarch, insn, "mul/mla", dsc);
7610 else if ((op1 & 0x10) == 0x10 && op2 == 0x9)
7611 return arm_copy_unmodified (gdbarch, insn, "synch", dsc);
7612 else if (op2 == 0xb || (op2 & 0xd) == 0xd)
7613 /* 2nd arg means "unpriveleged". */
7614 return arm_copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs,
7615 dsc);
7616 }
7617
7618 /* Should be unreachable. */
7619 return 1;
7620 }
7621
7622 static int
7623 arm_decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn,
7624 struct regcache *regs,
7625 struct displaced_step_closure *dsc)
7626 {
7627 int a = bit (insn, 25), b = bit (insn, 4);
7628 uint32_t op1 = bits (insn, 20, 24);
7629 int rn_f = bits (insn, 16, 19) == 0xf;
7630
7631 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02)
7632 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b))
7633 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 0);
7634 else if ((!a && (op1 & 0x17) == 0x02)
7635 || (a && (op1 & 0x17) == 0x02 && !b))
7636 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 4, 1);
7637 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03)
7638 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b))
7639 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 0);
7640 else if ((!a && (op1 & 0x17) == 0x03)
7641 || (a && (op1 & 0x17) == 0x03 && !b))
7642 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 4, 1);
7643 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06)
7644 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b))
7645 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0);
7646 else if ((!a && (op1 & 0x17) == 0x06)
7647 || (a && (op1 & 0x17) == 0x06 && !b))
7648 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1);
7649 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07)
7650 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b))
7651 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0);
7652 else if ((!a && (op1 & 0x17) == 0x07)
7653 || (a && (op1 & 0x17) == 0x07 && !b))
7654 return arm_copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1);
7655
7656 /* Should be unreachable. */
7657 return 1;
7658 }
7659
7660 static int
7661 arm_decode_media (struct gdbarch *gdbarch, uint32_t insn,
7662 struct displaced_step_closure *dsc)
7663 {
7664 switch (bits (insn, 20, 24))
7665 {
7666 case 0x00: case 0x01: case 0x02: case 0x03:
7667 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc);
7668
7669 case 0x04: case 0x05: case 0x06: case 0x07:
7670 return arm_copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc);
7671
7672 case 0x08: case 0x09: case 0x0a: case 0x0b:
7673 case 0x0c: case 0x0d: case 0x0e: case 0x0f:
7674 return arm_copy_unmodified (gdbarch, insn,
7675 "decode/pack/unpack/saturate/reverse", dsc);
7676
7677 case 0x18:
7678 if (bits (insn, 5, 7) == 0) /* op2. */
7679 {
7680 if (bits (insn, 12, 15) == 0xf)
7681 return arm_copy_unmodified (gdbarch, insn, "usad8", dsc);
7682 else
7683 return arm_copy_unmodified (gdbarch, insn, "usada8", dsc);
7684 }
7685 else
7686 return arm_copy_undef (gdbarch, insn, dsc);
7687
7688 case 0x1a: case 0x1b:
7689 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7690 return arm_copy_unmodified (gdbarch, insn, "sbfx", dsc);
7691 else
7692 return arm_copy_undef (gdbarch, insn, dsc);
7693
7694 case 0x1c: case 0x1d:
7695 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */
7696 {
7697 if (bits (insn, 0, 3) == 0xf)
7698 return arm_copy_unmodified (gdbarch, insn, "bfc", dsc);
7699 else
7700 return arm_copy_unmodified (gdbarch, insn, "bfi", dsc);
7701 }
7702 else
7703 return arm_copy_undef (gdbarch, insn, dsc);
7704
7705 case 0x1e: case 0x1f:
7706 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */
7707 return arm_copy_unmodified (gdbarch, insn, "ubfx", dsc);
7708 else
7709 return arm_copy_undef (gdbarch, insn, dsc);
7710 }
7711
7712 /* Should be unreachable. */
7713 return 1;
7714 }
7715
7716 static int
7717 arm_decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn,
7718 struct regcache *regs,
7719 struct displaced_step_closure *dsc)
7720 {
7721 if (bit (insn, 25))
7722 return arm_copy_b_bl_blx (gdbarch, insn, regs, dsc);
7723 else
7724 return arm_copy_block_xfer (gdbarch, insn, regs, dsc);
7725 }
7726
7727 static int
7728 arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
7729 struct regcache *regs,
7730 struct displaced_step_closure *dsc)
7731 {
7732 unsigned int opcode = bits (insn, 20, 24);
7733
7734 switch (opcode)
7735 {
7736 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */
7737 return arm_copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc);
7738
7739 case 0x08: case 0x0a: case 0x0c: case 0x0e:
7740 case 0x12: case 0x16:
7741 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc);
7742
7743 case 0x09: case 0x0b: case 0x0d: case 0x0f:
7744 case 0x13: case 0x17:
7745 return arm_copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc);
7746
7747 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7748 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7749 /* Note: no writeback for these instructions. Bit 25 will always be
7750 zero though (via caller), so the following works OK. */
7751 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7752 }
7753
7754 /* Should be unreachable. */
7755 return 1;
7756 }
7757
7758 /* Decode shifted register instructions. */
7759
7760 static int
7761 thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
7762 uint16_t insn2, struct regcache *regs,
7763 struct displaced_step_closure *dsc)
7764 {
7765 /* PC is only allowed to be used in instruction MOV. */
7766
7767 unsigned int op = bits (insn1, 5, 8);
7768 unsigned int rn = bits (insn1, 0, 3);
7769
7770 if (op == 0x2 && rn == 0xf) /* MOV */
7771 return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
7772 else
7773 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7774 "dp (shift reg)", dsc);
7775 }
7776
7777
7778 /* Decode extension register load/store. Exactly the same as
7779 arm_decode_ext_reg_ld_st. */
7780
7781 static int
7782 thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
7783 uint16_t insn2, struct regcache *regs,
7784 struct displaced_step_closure *dsc)
7785 {
7786 unsigned int opcode = bits (insn1, 4, 8);
7787
7788 switch (opcode)
7789 {
7790 case 0x04: case 0x05:
7791 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7792 "vfp/neon vmov", dsc);
7793
7794 case 0x08: case 0x0c: /* 01x00 */
7795 case 0x0a: case 0x0e: /* 01x10 */
7796 case 0x12: case 0x16: /* 10x10 */
7797 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7798 "vfp/neon vstm/vpush", dsc);
7799
7800 case 0x09: case 0x0d: /* 01x01 */
7801 case 0x0b: case 0x0f: /* 01x11 */
7802 case 0x13: case 0x17: /* 10x11 */
7803 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7804 "vfp/neon vldm/vpop", dsc);
7805
7806 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
7807 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7808 "vstr", dsc);
7809 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
7810 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
7811 }
7812
7813 /* Should be unreachable. */
7814 return 1;
7815 }
7816
7817 static int
7818 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
7819 struct regcache *regs, struct displaced_step_closure *dsc)
7820 {
7821 unsigned int op1 = bits (insn, 20, 25);
7822 int op = bit (insn, 4);
7823 unsigned int coproc = bits (insn, 8, 11);
7824 unsigned int rn = bits (insn, 16, 19);
7825
7826 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa)
7827 return arm_decode_ext_reg_ld_st (gdbarch, insn, regs, dsc);
7828 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00
7829 && (coproc & 0xe) != 0xa)
7830 /* stc/stc2. */
7831 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7832 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00
7833 && (coproc & 0xe) != 0xa)
7834 /* ldc/ldc2 imm/lit. */
7835 return arm_copy_copro_load_store (gdbarch, insn, regs, dsc);
7836 else if ((op1 & 0x3e) == 0x00)
7837 return arm_copy_undef (gdbarch, insn, dsc);
7838 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa)
7839 return arm_copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc);
7840 else if (op1 == 0x04 && (coproc & 0xe) != 0xa)
7841 return arm_copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc);
7842 else if (op1 == 0x05 && (coproc & 0xe) != 0xa)
7843 return arm_copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc);
7844 else if ((op1 & 0x30) == 0x20 && !op)
7845 {
7846 if ((coproc & 0xe) == 0xa)
7847 return arm_copy_unmodified (gdbarch, insn, "vfp dataproc", dsc);
7848 else
7849 return arm_copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc);
7850 }
7851 else if ((op1 & 0x30) == 0x20 && op)
7852 return arm_copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc);
7853 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa)
7854 return arm_copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc);
7855 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa)
7856 return arm_copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc);
7857 else if ((op1 & 0x30) == 0x30)
7858 return arm_copy_svc (gdbarch, insn, regs, dsc);
7859 else
7860 return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
7861 }
7862
7863 static int
7864 thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
7865 uint16_t insn2, struct regcache *regs,
7866 struct displaced_step_closure *dsc)
7867 {
7868 unsigned int coproc = bits (insn2, 8, 11);
7869 unsigned int op1 = bits (insn1, 4, 9);
7870 unsigned int bit_5_8 = bits (insn1, 5, 8);
7871 unsigned int bit_9 = bit (insn1, 9);
7872 unsigned int bit_4 = bit (insn1, 4);
7873 unsigned int rn = bits (insn1, 0, 3);
7874
7875 if (bit_9 == 0)
7876 {
7877 if (bit_5_8 == 2)
7878 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7879 "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
7880 dsc);
7881 else if (bit_5_8 == 0) /* UNDEFINED. */
7882 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
7883 else
7884 {
7885 /*coproc is 101x. SIMD/VFP, ext registers load/store. */
7886 if ((coproc & 0xe) == 0xa)
7887 return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
7888 dsc);
7889 else /* coproc is not 101x. */
7890 {
7891 if (bit_4 == 0) /* STC/STC2. */
7892 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
7893 "stc/stc2", dsc);
7894 else /* LDC/LDC2 {literal, immeidate}. */
7895 return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
7896 regs, dsc);
7897 }
7898 }
7899 }
7900 else
7901 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
7902
7903 return 0;
7904 }
7905
7906 static void
7907 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
7908 struct displaced_step_closure *dsc, int rd)
7909 {
7910 /* ADR Rd, #imm
7911
7912 Rewrite as:
7913
7914 Preparation: Rd <- PC
7915 Insn: ADD Rd, #imm
7916 Cleanup: Null.
7917 */
7918
7919 /* Rd <- PC */
7920 int val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
7921 displaced_write_reg (regs, dsc, rd, val, CANNOT_WRITE_PC);
7922 }
7923
7924 static int
7925 thumb_copy_pc_relative_16bit (struct gdbarch *gdbarch, struct regcache *regs,
7926 struct displaced_step_closure *dsc,
7927 int rd, unsigned int imm)
7928 {
7929
7930 /* Encoding T2: ADDS Rd, #imm */
7931 dsc->modinsn[0] = (0x3000 | (rd << 8) | imm);
7932
7933 install_pc_relative (gdbarch, regs, dsc, rd);
7934
7935 return 0;
7936 }
7937
7938 static int
7939 thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
7940 struct regcache *regs,
7941 struct displaced_step_closure *dsc)
7942 {
7943 unsigned int rd = bits (insn, 8, 10);
7944 unsigned int imm8 = bits (insn, 0, 7);
7945
7946 if (debug_displaced)
7947 fprintf_unfiltered (gdb_stdlog,
7948 "displaced: copying thumb adr r%d, #%d insn %.4x\n",
7949 rd, imm8, insn);
7950
7951 return thumb_copy_pc_relative_16bit (gdbarch, regs, dsc, rd, imm8);
7952 }
7953
7954 static int
7955 thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
7956 uint16_t insn2, struct regcache *regs,
7957 struct displaced_step_closure *dsc)
7958 {
7959 unsigned int rd = bits (insn2, 8, 11);
7960 /* Since immediate has the same encoding in ADR ADD and SUB, so we simply
7961 extract raw immediate encoding rather than computing immediate. When
7962 generating ADD or SUB instruction, we can simply perform OR operation to
7963 set immediate into ADD. */
7964 unsigned int imm_3_8 = insn2 & 0x70ff;
7965 unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10. */
7966
7967 if (debug_displaced)
7968 fprintf_unfiltered (gdb_stdlog,
7969 "displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
7970 rd, imm_i, imm_3_8, insn1, insn2);
7971
7972 if (bit (insn1, 7)) /* Encoding T2 */
7973 {
7974 /* Encoding T3: SUB Rd, Rd, #imm */
7975 dsc->modinsn[0] = (0xf1a0 | rd | imm_i);
7976 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7977 }
7978 else /* Encoding T3 */
7979 {
7980 /* Encoding T3: ADD Rd, Rd, #imm */
7981 dsc->modinsn[0] = (0xf100 | rd | imm_i);
7982 dsc->modinsn[1] = ((rd << 8) | imm_3_8);
7983 }
7984 dsc->numinsns = 2;
7985
7986 install_pc_relative (gdbarch, regs, dsc, rd);
7987
7988 return 0;
7989 }
7990
7991 static int
7992 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
7993 struct regcache *regs,
7994 struct displaced_step_closure *dsc)
7995 {
7996 unsigned int rt = bits (insn1, 8, 10);
7997 unsigned int pc;
7998 int imm8 = (bits (insn1, 0, 7) << 2);
7999 CORE_ADDR from = dsc->insn_addr;
8000
8001 /* LDR Rd, #imm8
8002
8003 Rwrite as:
8004
8005 Preparation: tmp0 <- R0, tmp2 <- R2, tmp3 <- R3, R2 <- PC, R3 <- #imm8;
8006
8007 Insn: LDR R0, [R2, R3];
8008 Cleanup: R2 <- tmp2, R3 <- tmp3, Rd <- R0, R0 <- tmp0 */
8009
8010 if (debug_displaced)
8011 fprintf_unfiltered (gdb_stdlog,
8012 "displaced: copying thumb ldr r%d [pc #%d]\n"
8013 , rt, imm8);
8014
8015 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
8016 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2);
8017 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3);
8018 pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
8019 /* The assembler calculates the required value of the offset from the
8020 Align(PC,4) value of this instruction to the label. */
8021 pc = pc & 0xfffffffc;
8022
8023 displaced_write_reg (regs, dsc, 2, pc, CANNOT_WRITE_PC);
8024 displaced_write_reg (regs, dsc, 3, imm8, CANNOT_WRITE_PC);
8025
8026 dsc->rd = rt;
8027 dsc->u.ldst.xfersize = 4;
8028 dsc->u.ldst.rn = 0;
8029 dsc->u.ldst.immed = 0;
8030 dsc->u.ldst.writeback = 0;
8031 dsc->u.ldst.restore_r4 = 0;
8032
8033 dsc->modinsn[0] = 0x58d0; /* ldr r0, [r2, r3]*/
8034
8035 dsc->cleanup = &cleanup_load;
8036
8037 return 0;
8038 }
8039
8040 /* Copy Thumb cbnz/cbz insruction. */
8041
8042 static int
8043 thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
8044 struct regcache *regs,
8045 struct displaced_step_closure *dsc)
8046 {
8047 int non_zero = bit (insn1, 11);
8048 unsigned int imm5 = (bit (insn1, 9) << 6) | (bits (insn1, 3, 7) << 1);
8049 CORE_ADDR from = dsc->insn_addr;
8050 int rn = bits (insn1, 0, 2);
8051 int rn_val = displaced_read_reg (regs, dsc, rn);
8052
8053 dsc->u.branch.cond = (rn_val && non_zero) || (!rn_val && !non_zero);
8054 /* CBNZ and CBZ do not affect the condition flags. If condition is true,
8055 set it INST_AL, so cleanup_branch will know branch is taken, otherwise,
8056 condition is false, let it be, cleanup_branch will do nothing. */
8057 if (dsc->u.branch.cond)
8058 {
8059 dsc->u.branch.cond = INST_AL;
8060 dsc->u.branch.dest = from + 4 + imm5;
8061 }
8062 else
8063 dsc->u.branch.dest = from + 2;
8064
8065 dsc->u.branch.link = 0;
8066 dsc->u.branch.exchange = 0;
8067
8068 if (debug_displaced)
8069 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s [r%d = 0x%x]"
8070 " insn %.4x to %.8lx\n", non_zero ? "cbnz" : "cbz",
8071 rn, rn_val, insn1, dsc->u.branch.dest);
8072
8073 dsc->modinsn[0] = THUMB_NOP;
8074
8075 dsc->cleanup = &cleanup_branch;
8076 return 0;
8077 }
8078
8079 /* Copy Table Branch Byte/Halfword */
8080 static int
8081 thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
8082 uint16_t insn2, struct regcache *regs,
8083 struct displaced_step_closure *dsc)
8084 {
8085 ULONGEST rn_val, rm_val;
8086 int is_tbh = bit (insn2, 4);
8087 CORE_ADDR halfwords = 0;
8088 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8089
8090 rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
8091 rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
8092
8093 if (is_tbh)
8094 {
8095 gdb_byte buf[2];
8096
8097 target_read_memory (rn_val + 2 * rm_val, buf, 2);
8098 halfwords = extract_unsigned_integer (buf, 2, byte_order);
8099 }
8100 else
8101 {
8102 gdb_byte buf[1];
8103
8104 target_read_memory (rn_val + rm_val, buf, 1);
8105 halfwords = extract_unsigned_integer (buf, 1, byte_order);
8106 }
8107
8108 if (debug_displaced)
8109 fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
8110 " offset 0x%x\n", is_tbh ? "tbh" : "tbb",
8111 (unsigned int) rn_val, (unsigned int) rm_val,
8112 (unsigned int) halfwords);
8113
8114 dsc->u.branch.cond = INST_AL;
8115 dsc->u.branch.link = 0;
8116 dsc->u.branch.exchange = 0;
8117 dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
8118
8119 dsc->cleanup = &cleanup_branch;
8120
8121 return 0;
8122 }
8123
8124 static void
8125 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
8126 struct displaced_step_closure *dsc)
8127 {
8128 /* PC <- r7 */
8129 int val = displaced_read_reg (regs, dsc, 7);
8130 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, val, BX_WRITE_PC);
8131
8132 /* r7 <- r8 */
8133 val = displaced_read_reg (regs, dsc, 8);
8134 displaced_write_reg (regs, dsc, 7, val, CANNOT_WRITE_PC);
8135
8136 /* r8 <- tmp[0] */
8137 displaced_write_reg (regs, dsc, 8, dsc->tmp[0], CANNOT_WRITE_PC);
8138
8139 }
8140
8141 static int
8142 thumb_copy_pop_pc_16bit (struct gdbarch *gdbarch, unsigned short insn1,
8143 struct regcache *regs,
8144 struct displaced_step_closure *dsc)
8145 {
8146 dsc->u.block.regmask = insn1 & 0x00ff;
8147
8148 /* Rewrite instruction: POP {rX, rY, ...,rZ, PC}
8149 to :
8150
8151 (1) register list is full, that is, r0-r7 are used.
8152 Prepare: tmp[0] <- r8
8153
8154 POP {r0, r1, ...., r6, r7}; remove PC from reglist
8155 MOV r8, r7; Move value of r7 to r8;
8156 POP {r7}; Store PC value into r7.
8157
8158 Cleanup: PC <- r7, r7 <- r8, r8 <-tmp[0]
8159
8160 (2) register list is not full, supposing there are N registers in
8161 register list (except PC, 0 <= N <= 7).
8162 Prepare: for each i, 0 - N, tmp[i] <- ri.
8163
8164 POP {r0, r1, ...., rN};
8165
8166 Cleanup: Set registers in original reglist from r0 - rN. Restore r0 - rN
8167 from tmp[] properly.
8168 */
8169 if (debug_displaced)
8170 fprintf_unfiltered (gdb_stdlog,
8171 "displaced: copying thumb pop {%.8x, pc} insn %.4x\n",
8172 dsc->u.block.regmask, insn1);
8173
8174 if (dsc->u.block.regmask == 0xff)
8175 {
8176 dsc->tmp[0] = displaced_read_reg (regs, dsc, 8);
8177
8178 dsc->modinsn[0] = (insn1 & 0xfeff); /* POP {r0,r1,...,r6, r7} */
8179 dsc->modinsn[1] = 0x46b8; /* MOV r8, r7 */
8180 dsc->modinsn[2] = 0xbc80; /* POP {r7} */
8181
8182 dsc->numinsns = 3;
8183 dsc->cleanup = &cleanup_pop_pc_16bit_all;
8184 }
8185 else
8186 {
8187 unsigned int num_in_list = bitcount (dsc->u.block.regmask);
8188 unsigned int new_regmask, bit = 1;
8189 unsigned int to = 0, from = 0, i, new_rn;
8190
8191 for (i = 0; i < num_in_list + 1; i++)
8192 dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
8193
8194 new_regmask = (1 << (num_in_list + 1)) - 1;
8195
8196 if (debug_displaced)
8197 fprintf_unfiltered (gdb_stdlog, _("displaced: POP "
8198 "{..., pc}: original reg list %.4x,"
8199 " modified list %.4x\n"),
8200 (int) dsc->u.block.regmask, new_regmask);
8201
8202 dsc->u.block.regmask |= 0x8000;
8203 dsc->u.block.writeback = 0;
8204 dsc->u.block.cond = INST_AL;
8205
8206 dsc->modinsn[0] = (insn1 & ~0x1ff) | (new_regmask & 0xff);
8207
8208 dsc->cleanup = &cleanup_block_load_pc;
8209 }
8210
8211 return 0;
8212 }
8213
8214 static void
8215 thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8216 struct regcache *regs,
8217 struct displaced_step_closure *dsc)
8218 {
8219 unsigned short op_bit_12_15 = bits (insn1, 12, 15);
8220 unsigned short op_bit_10_11 = bits (insn1, 10, 11);
8221 int err = 0;
8222
8223 /* 16-bit thumb instructions. */
8224 switch (op_bit_12_15)
8225 {
8226 /* Shift (imme), add, subtract, move and compare. */
8227 case 0: case 1: case 2: case 3:
8228 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8229 "shift/add/sub/mov/cmp",
8230 dsc);
8231 break;
8232 case 4:
8233 switch (op_bit_10_11)
8234 {
8235 case 0: /* Data-processing */
8236 err = thumb_copy_unmodified_16bit (gdbarch, insn1,
8237 "data-processing",
8238 dsc);
8239 break;
8240 case 1: /* Special data instructions and branch and exchange. */
8241 {
8242 unsigned short op = bits (insn1, 7, 9);
8243 if (op == 6 || op == 7) /* BX or BLX */
8244 err = thumb_copy_bx_blx_reg (gdbarch, insn1, regs, dsc);
8245 else if (bits (insn1, 6, 7) != 0) /* ADD/MOV/CMP high registers. */
8246 err = thumb_copy_alu_reg (gdbarch, insn1, regs, dsc);
8247 else
8248 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "special data",
8249 dsc);
8250 }
8251 break;
8252 default: /* LDR (literal) */
8253 err = thumb_copy_16bit_ldr_literal (gdbarch, insn1, regs, dsc);
8254 }
8255 break;
8256 case 5: case 6: case 7: case 8: case 9: /* Load/Store single data item */
8257 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldr/str", dsc);
8258 break;
8259 case 10:
8260 if (op_bit_10_11 < 2) /* Generate PC-relative address */
8261 err = thumb_decode_pc_relative_16bit (gdbarch, insn1, regs, dsc);
8262 else /* Generate SP-relative address */
8263 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "sp-relative", dsc);
8264 break;
8265 case 11: /* Misc 16-bit instructions */
8266 {
8267 switch (bits (insn1, 8, 11))
8268 {
8269 case 1: case 3: case 9: case 11: /* CBNZ, CBZ */
8270 err = thumb_copy_cbnz_cbz (gdbarch, insn1, regs, dsc);
8271 break;
8272 case 12: case 13: /* POP */
8273 if (bit (insn1, 8)) /* PC is in register list. */
8274 err = thumb_copy_pop_pc_16bit (gdbarch, insn1, regs, dsc);
8275 else
8276 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "pop", dsc);
8277 break;
8278 case 15: /* If-Then, and hints */
8279 if (bits (insn1, 0, 3))
8280 /* If-Then makes up to four following instructions conditional.
8281 IT instruction itself is not conditional, so handle it as a
8282 common unmodified instruction. */
8283 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "If-Then",
8284 dsc);
8285 else
8286 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "hints", dsc);
8287 break;
8288 default:
8289 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "misc", dsc);
8290 }
8291 }
8292 break;
8293 case 12:
8294 if (op_bit_10_11 < 2) /* Store multiple registers */
8295 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "stm", dsc);
8296 else /* Load multiple registers */
8297 err = thumb_copy_unmodified_16bit (gdbarch, insn1, "ldm", dsc);
8298 break;
8299 case 13: /* Conditional branch and supervisor call */
8300 if (bits (insn1, 9, 11) != 7) /* conditional branch */
8301 err = thumb_copy_b (gdbarch, insn1, dsc);
8302 else
8303 err = thumb_copy_svc (gdbarch, insn1, regs, dsc);
8304 break;
8305 case 14: /* Unconditional branch */
8306 err = thumb_copy_b (gdbarch, insn1, dsc);
8307 break;
8308 default:
8309 err = 1;
8310 }
8311
8312 if (err)
8313 internal_error (__FILE__, __LINE__,
8314 _("thumb_process_displaced_16bit_insn: Instruction decode error"));
8315 }
8316
8317 static int
8318 decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
8319 uint16_t insn1, uint16_t insn2,
8320 struct regcache *regs,
8321 struct displaced_step_closure *dsc)
8322 {
8323 int rt = bits (insn2, 12, 15);
8324 int rn = bits (insn1, 0, 3);
8325 int op1 = bits (insn1, 7, 8);
8326 int err = 0;
8327
8328 switch (bits (insn1, 5, 6))
8329 {
8330 case 0: /* Load byte and memory hints */
8331 if (rt == 0xf) /* PLD/PLI */
8332 {
8333 if (rn == 0xf)
8334 /* PLD literal or Encoding T3 of PLI(immediate, literal). */
8335 return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
8336 else
8337 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8338 "pli/pld", dsc);
8339 }
8340 else
8341 {
8342 if (rn == 0xf) /* LDRB/LDRSB (literal) */
8343 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8344 1);
8345 else
8346 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8347 "ldrb{reg, immediate}/ldrbt",
8348 dsc);
8349 }
8350
8351 break;
8352 case 1: /* Load halfword and memory hints. */
8353 if (rt == 0xf) /* PLD{W} and Unalloc memory hint. */
8354 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8355 "pld/unalloc memhint", dsc);
8356 else
8357 {
8358 if (rn == 0xf)
8359 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc,
8360 2);
8361 else
8362 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8363 "ldrh/ldrht", dsc);
8364 }
8365 break;
8366 case 2: /* Load word */
8367 {
8368 int insn2_bit_8_11 = bits (insn2, 8, 11);
8369
8370 if (rn == 0xf)
8371 return thumb2_copy_load_literal (gdbarch, insn1, insn2, regs, dsc, 4);
8372 else if (op1 == 0x1) /* Encoding T3 */
8373 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs, dsc,
8374 0, 1);
8375 else /* op1 == 0x0 */
8376 {
8377 if (insn2_bit_8_11 == 0xc || (insn2_bit_8_11 & 0x9) == 0x9)
8378 /* LDR (immediate) */
8379 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8380 dsc, bit (insn2, 8), 1);
8381 else if (insn2_bit_8_11 == 0xe) /* LDRT */
8382 return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8383 "ldrt", dsc);
8384 else
8385 /* LDR (register) */
8386 return thumb2_copy_load_reg_imm (gdbarch, insn1, insn2, regs,
8387 dsc, 0, 0);
8388 }
8389 break;
8390 }
8391 default:
8392 return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
8393 break;
8394 }
8395 return 0;
8396 }
8397
8398 static void
8399 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
8400 uint16_t insn2, struct regcache *regs,
8401 struct displaced_step_closure *dsc)
8402 {
8403 int err = 0;
8404 unsigned short op = bit (insn2, 15);
8405 unsigned int op1 = bits (insn1, 11, 12);
8406
8407 switch (op1)
8408 {
8409 case 1:
8410 {
8411 switch (bits (insn1, 9, 10))
8412 {
8413 case 0:
8414 if (bit (insn1, 6))
8415 {
8416 /* Load/store {dual, execlusive}, table branch. */
8417 if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
8418 && bits (insn2, 5, 7) == 0)
8419 err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
8420 dsc);
8421 else
8422 /* PC is not allowed to use in load/store {dual, exclusive}
8423 instructions. */
8424 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8425 "load/store dual/ex", dsc);
8426 }
8427 else /* load/store multiple */
8428 {
8429 switch (bits (insn1, 7, 8))
8430 {
8431 case 0: case 3: /* SRS, RFE */
8432 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8433 "srs/rfe", dsc);
8434 break;
8435 case 1: case 2: /* LDM/STM/PUSH/POP */
8436 err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
8437 break;
8438 }
8439 }
8440 break;
8441
8442 case 1:
8443 /* Data-processing (shift register). */
8444 err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
8445 dsc);
8446 break;
8447 default: /* Coprocessor instructions. */
8448 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8449 break;
8450 }
8451 break;
8452 }
8453 case 2: /* op1 = 2 */
8454 if (op) /* Branch and misc control. */
8455 {
8456 if (bit (insn2, 14) /* BLX/BL */
8457 || bit (insn2, 12) /* Unconditional branch */
8458 || (bits (insn1, 7, 9) != 0x7)) /* Conditional branch */
8459 err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
8460 else
8461 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8462 "misc ctrl", dsc);
8463 }
8464 else
8465 {
8466 if (bit (insn1, 9)) /* Data processing (plain binary imm). */
8467 {
8468 int op = bits (insn1, 4, 8);
8469 int rn = bits (insn1, 0, 3);
8470 if ((op == 0 || op == 0xa) && rn == 0xf)
8471 err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
8472 regs, dsc);
8473 else
8474 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8475 "dp/pb", dsc);
8476 }
8477 else /* Data processing (modified immeidate) */
8478 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8479 "dp/mi", dsc);
8480 }
8481 break;
8482 case 3: /* op1 = 3 */
8483 switch (bits (insn1, 9, 10))
8484 {
8485 case 0:
8486 if (bit (insn1, 4))
8487 err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
8488 regs, dsc);
8489 else /* NEON Load/Store and Store single data item */
8490 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8491 "neon elt/struct load/store",
8492 dsc);
8493 break;
8494 case 1: /* op1 = 3, bits (9, 10) == 1 */
8495 switch (bits (insn1, 7, 8))
8496 {
8497 case 0: case 1: /* Data processing (register) */
8498 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8499 "dp(reg)", dsc);
8500 break;
8501 case 2: /* Multiply and absolute difference */
8502 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8503 "mul/mua/diff", dsc);
8504 break;
8505 case 3: /* Long multiply and divide */
8506 err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
8507 "lmul/lmua", dsc);
8508 break;
8509 }
8510 break;
8511 default: /* Coprocessor instructions */
8512 err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
8513 break;
8514 }
8515 break;
8516 default:
8517 err = 1;
8518 }
8519
8520 if (err)
8521 internal_error (__FILE__, __LINE__,
8522 _("thumb_process_displaced_32bit_insn: Instruction decode error"));
8523
8524 }
8525
8526 static void
8527 thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8528 CORE_ADDR to, struct regcache *regs,
8529 struct displaced_step_closure *dsc)
8530 {
8531 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8532 uint16_t insn1
8533 = read_memory_unsigned_integer (from, 2, byte_order_for_code);
8534
8535 if (debug_displaced)
8536 fprintf_unfiltered (gdb_stdlog, "displaced: process thumb insn %.4x "
8537 "at %.8lx\n", insn1, (unsigned long) from);
8538
8539 dsc->is_thumb = 1;
8540 dsc->insn_size = thumb_insn_size (insn1);
8541 if (thumb_insn_size (insn1) == 4)
8542 {
8543 uint16_t insn2
8544 = read_memory_unsigned_integer (from + 2, 2, byte_order_for_code);
8545 thumb_process_displaced_32bit_insn (gdbarch, insn1, insn2, regs, dsc);
8546 }
8547 else
8548 thumb_process_displaced_16bit_insn (gdbarch, insn1, regs, dsc);
8549 }
8550
8551 void
8552 arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from,
8553 CORE_ADDR to, struct regcache *regs,
8554 struct displaced_step_closure *dsc)
8555 {
8556 int err = 0;
8557 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8558 uint32_t insn;
8559
8560 /* Most displaced instructions use a 1-instruction scratch space, so set this
8561 here and override below if/when necessary. */
8562 dsc->numinsns = 1;
8563 dsc->insn_addr = from;
8564 dsc->scratch_base = to;
8565 dsc->cleanup = NULL;
8566 dsc->wrote_to_pc = 0;
8567
8568 if (!displaced_in_arm_mode (regs))
8569 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc);
8570
8571 dsc->is_thumb = 0;
8572 dsc->insn_size = 4;
8573 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
8574 if (debug_displaced)
8575 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx "
8576 "at %.8lx\n", (unsigned long) insn,
8577 (unsigned long) from);
8578
8579 if ((insn & 0xf0000000) == 0xf0000000)
8580 err = arm_decode_unconditional (gdbarch, insn, regs, dsc);
8581 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24))
8582 {
8583 case 0x0: case 0x1: case 0x2: case 0x3:
8584 err = arm_decode_dp_misc (gdbarch, insn, regs, dsc);
8585 break;
8586
8587 case 0x4: case 0x5: case 0x6:
8588 err = arm_decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc);
8589 break;
8590
8591 case 0x7:
8592 err = arm_decode_media (gdbarch, insn, dsc);
8593 break;
8594
8595 case 0x8: case 0x9: case 0xa: case 0xb:
8596 err = arm_decode_b_bl_ldmstm (gdbarch, insn, regs, dsc);
8597 break;
8598
8599 case 0xc: case 0xd: case 0xe: case 0xf:
8600 err = arm_decode_svc_copro (gdbarch, insn, to, regs, dsc);
8601 break;
8602 }
8603
8604 if (err)
8605 internal_error (__FILE__, __LINE__,
8606 _("arm_process_displaced_insn: Instruction decode error"));
8607 }
8608
8609 /* Actually set up the scratch space for a displaced instruction. */
8610
8611 void
8612 arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from,
8613 CORE_ADDR to, struct displaced_step_closure *dsc)
8614 {
8615 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8616 unsigned int i, len, offset;
8617 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8618 int size = dsc->is_thumb? 2 : 4;
8619 const gdb_byte *bkp_insn;
8620
8621 offset = 0;
8622 /* Poke modified instruction(s). */
8623 for (i = 0; i < dsc->numinsns; i++)
8624 {
8625 if (debug_displaced)
8626 {
8627 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn ");
8628 if (size == 4)
8629 fprintf_unfiltered (gdb_stdlog, "%.8lx",
8630 dsc->modinsn[i]);
8631 else if (size == 2)
8632 fprintf_unfiltered (gdb_stdlog, "%.4x",
8633 (unsigned short)dsc->modinsn[i]);
8634
8635 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n",
8636 (unsigned long) to + offset);
8637
8638 }
8639 write_memory_unsigned_integer (to + offset, size,
8640 byte_order_for_code,
8641 dsc->modinsn[i]);
8642 offset += size;
8643 }
8644
8645 /* Choose the correct breakpoint instruction. */
8646 if (dsc->is_thumb)
8647 {
8648 bkp_insn = tdep->thumb_breakpoint;
8649 len = tdep->thumb_breakpoint_size;
8650 }
8651 else
8652 {
8653 bkp_insn = tdep->arm_breakpoint;
8654 len = tdep->arm_breakpoint_size;
8655 }
8656
8657 /* Put breakpoint afterwards. */
8658 write_memory (to + offset, bkp_insn, len);
8659
8660 if (debug_displaced)
8661 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
8662 paddress (gdbarch, from), paddress (gdbarch, to));
8663 }
8664
8665 /* Entry point for copying an instruction into scratch space for displaced
8666 stepping. */
8667
8668 struct displaced_step_closure *
8669 arm_displaced_step_copy_insn (struct gdbarch *gdbarch,
8670 CORE_ADDR from, CORE_ADDR to,
8671 struct regcache *regs)
8672 {
8673 struct displaced_step_closure *dsc
8674 = xmalloc (sizeof (struct displaced_step_closure));
8675 arm_process_displaced_insn (gdbarch, from, to, regs, dsc);
8676 arm_displaced_init_closure (gdbarch, from, to, dsc);
8677
8678 return dsc;
8679 }
8680
8681 /* Entry point for cleaning things up after a displaced instruction has been
8682 single-stepped. */
8683
8684 void
8685 arm_displaced_step_fixup (struct gdbarch *gdbarch,
8686 struct displaced_step_closure *dsc,
8687 CORE_ADDR from, CORE_ADDR to,
8688 struct regcache *regs)
8689 {
8690 if (dsc->cleanup)
8691 dsc->cleanup (gdbarch, regs, dsc);
8692
8693 if (!dsc->wrote_to_pc)
8694 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM,
8695 dsc->insn_addr + dsc->insn_size);
8696
8697 }
8698
8699 #include "bfd-in2.h"
8700 #include "libcoff.h"
8701
8702 static int
8703 gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info)
8704 {
8705 struct gdbarch *gdbarch = info->application_data;
8706
8707 if (arm_pc_is_thumb (gdbarch, memaddr))
8708 {
8709 static asymbol *asym;
8710 static combined_entry_type ce;
8711 static struct coff_symbol_struct csym;
8712 static struct bfd fake_bfd;
8713 static bfd_target fake_target;
8714
8715 if (csym.native == NULL)
8716 {
8717 /* Create a fake symbol vector containing a Thumb symbol.
8718 This is solely so that the code in print_insn_little_arm()
8719 and print_insn_big_arm() in opcodes/arm-dis.c will detect
8720 the presence of a Thumb symbol and switch to decoding
8721 Thumb instructions. */
8722
8723 fake_target.flavour = bfd_target_coff_flavour;
8724 fake_bfd.xvec = &fake_target;
8725 ce.u.syment.n_sclass = C_THUMBEXTFUNC;
8726 csym.native = &ce;
8727 csym.symbol.the_bfd = &fake_bfd;
8728 csym.symbol.name = "fake";
8729 asym = (asymbol *) & csym;
8730 }
8731
8732 memaddr = UNMAKE_THUMB_ADDR (memaddr);
8733 info->symbols = &asym;
8734 }
8735 else
8736 info->symbols = NULL;
8737
8738 if (info->endian == BFD_ENDIAN_BIG)
8739 return print_insn_big_arm (memaddr, info);
8740 else
8741 return print_insn_little_arm (memaddr, info);
8742 }
8743
8744 /* The following define instruction sequences that will cause ARM
8745 cpu's to take an undefined instruction trap. These are used to
8746 signal a breakpoint to GDB.
8747
8748 The newer ARMv4T cpu's are capable of operating in ARM or Thumb
8749 modes. A different instruction is required for each mode. The ARM
8750 cpu's can also be big or little endian. Thus four different
8751 instructions are needed to support all cases.
8752
8753 Note: ARMv4 defines several new instructions that will take the
8754 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does
8755 not in fact add the new instructions. The new undefined
8756 instructions in ARMv4 are all instructions that had no defined
8757 behaviour in earlier chips. There is no guarantee that they will
8758 raise an exception, but may be treated as NOP's. In practice, it
8759 may only safe to rely on instructions matching:
8760
8761 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
8762 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
8763 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x
8764
8765 Even this may only true if the condition predicate is true. The
8766 following use a condition predicate of ALWAYS so it is always TRUE.
8767
8768 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX,
8769 and NetBSD all use a software interrupt rather than an undefined
8770 instruction to force a trap. This can be handled by by the
8771 abi-specific code during establishment of the gdbarch vector. */
8772
8773 #define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7}
8774 #define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE}
8775 #define THUMB_LE_BREAKPOINT {0xbe,0xbe}
8776 #define THUMB_BE_BREAKPOINT {0xbe,0xbe}
8777
8778 static const gdb_byte arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT;
8779 static const gdb_byte arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT;
8780 static const gdb_byte arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT;
8781 static const gdb_byte arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT;
8782
8783 /* Determine the type and size of breakpoint to insert at PCPTR. Uses
8784 the program counter value to determine whether a 16-bit or 32-bit
8785 breakpoint should be used. It returns a pointer to a string of
8786 bytes that encode a breakpoint instruction, stores the length of
8787 the string to *lenptr, and adjusts the program counter (if
8788 necessary) to point to the actual memory location where the
8789 breakpoint should be inserted. */
8790
8791 static const unsigned char *
8792 arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr)
8793 {
8794 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
8795 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
8796
8797 if (arm_pc_is_thumb (gdbarch, *pcptr))
8798 {
8799 *pcptr = UNMAKE_THUMB_ADDR (*pcptr);
8800
8801 /* If we have a separate 32-bit breakpoint instruction for Thumb-2,
8802 check whether we are replacing a 32-bit instruction. */
8803 if (tdep->thumb2_breakpoint != NULL)
8804 {
8805 gdb_byte buf[2];
8806 if (target_read_memory (*pcptr, buf, 2) == 0)
8807 {
8808 unsigned short inst1;
8809 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code);
8810 if (thumb_insn_size (inst1) == 4)
8811 {
8812 *lenptr = tdep->thumb2_breakpoint_size;
8813 return tdep->thumb2_breakpoint;
8814 }
8815 }
8816 }
8817
8818 *lenptr = tdep->thumb_breakpoint_size;
8819 return tdep->thumb_breakpoint;
8820 }
8821 else
8822 {
8823 *lenptr = tdep->arm_breakpoint_size;
8824 return tdep->arm_breakpoint;
8825 }
8826 }
8827
8828 static void
8829 arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
8830 int *kindptr)
8831 {
8832 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr);
8833
8834 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4)
8835 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so
8836 that this is not confused with a 32-bit ARM breakpoint. */
8837 *kindptr = 3;
8838 }
8839
8840 /* Extract from an array REGBUF containing the (raw) register state a
8841 function return value of type TYPE, and copy that, in virtual
8842 format, into VALBUF. */
8843
8844 static void
8845 arm_extract_return_value (struct type *type, struct regcache *regs,
8846 gdb_byte *valbuf)
8847 {
8848 struct gdbarch *gdbarch = get_regcache_arch (regs);
8849 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8850
8851 if (TYPE_CODE_FLT == TYPE_CODE (type))
8852 {
8853 switch (gdbarch_tdep (gdbarch)->fp_model)
8854 {
8855 case ARM_FLOAT_FPA:
8856 {
8857 /* The value is in register F0 in internal format. We need to
8858 extract the raw value and then convert it to the desired
8859 internal type. */
8860 bfd_byte tmpbuf[FP_REGISTER_SIZE];
8861
8862 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf);
8863 convert_from_extended (floatformat_from_type (type), tmpbuf,
8864 valbuf, gdbarch_byte_order (gdbarch));
8865 }
8866 break;
8867
8868 case ARM_FLOAT_SOFT_FPA:
8869 case ARM_FLOAT_SOFT_VFP:
8870 /* ARM_FLOAT_VFP can arise if this is a variadic function so
8871 not using the VFP ABI code. */
8872 case ARM_FLOAT_VFP:
8873 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf);
8874 if (TYPE_LENGTH (type) > 4)
8875 regcache_cooked_read (regs, ARM_A1_REGNUM + 1,
8876 valbuf + INT_REGISTER_SIZE);
8877 break;
8878
8879 default:
8880 internal_error (__FILE__, __LINE__,
8881 _("arm_extract_return_value: "
8882 "Floating point model not supported"));
8883 break;
8884 }
8885 }
8886 else if (TYPE_CODE (type) == TYPE_CODE_INT
8887 || TYPE_CODE (type) == TYPE_CODE_CHAR
8888 || TYPE_CODE (type) == TYPE_CODE_BOOL
8889 || TYPE_CODE (type) == TYPE_CODE_PTR
8890 || TYPE_CODE (type) == TYPE_CODE_REF
8891 || TYPE_CODE (type) == TYPE_CODE_ENUM)
8892 {
8893 /* If the type is a plain integer, then the access is
8894 straight-forward. Otherwise we have to play around a bit
8895 more. */
8896 int len = TYPE_LENGTH (type);
8897 int regno = ARM_A1_REGNUM;
8898 ULONGEST tmp;
8899
8900 while (len > 0)
8901 {
8902 /* By using store_unsigned_integer we avoid having to do
8903 anything special for small big-endian values. */
8904 regcache_cooked_read_unsigned (regs, regno++, &tmp);
8905 store_unsigned_integer (valbuf,
8906 (len > INT_REGISTER_SIZE
8907 ? INT_REGISTER_SIZE : len),
8908 byte_order, tmp);
8909 len -= INT_REGISTER_SIZE;
8910 valbuf += INT_REGISTER_SIZE;
8911 }
8912 }
8913 else
8914 {
8915 /* For a structure or union the behaviour is as if the value had
8916 been stored to word-aligned memory and then loaded into
8917 registers with 32-bit load instruction(s). */
8918 int len = TYPE_LENGTH (type);
8919 int regno = ARM_A1_REGNUM;
8920 bfd_byte tmpbuf[INT_REGISTER_SIZE];
8921
8922 while (len > 0)
8923 {
8924 regcache_cooked_read (regs, regno++, tmpbuf);
8925 memcpy (valbuf, tmpbuf,
8926 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
8927 len -= INT_REGISTER_SIZE;
8928 valbuf += INT_REGISTER_SIZE;
8929 }
8930 }
8931 }
8932
8933
8934 /* Will a function return an aggregate type in memory or in a
8935 register? Return 0 if an aggregate type can be returned in a
8936 register, 1 if it must be returned in memory. */
8937
8938 static int
8939 arm_return_in_memory (struct gdbarch *gdbarch, struct type *type)
8940 {
8941 int nRc;
8942 enum type_code code;
8943
8944 CHECK_TYPEDEF (type);
8945
8946 /* In the ARM ABI, "integer" like aggregate types are returned in
8947 registers. For an aggregate type to be integer like, its size
8948 must be less than or equal to INT_REGISTER_SIZE and the
8949 offset of each addressable subfield must be zero. Note that bit
8950 fields are not addressable, and all addressable subfields of
8951 unions always start at offset zero.
8952
8953 This function is based on the behaviour of GCC 2.95.1.
8954 See: gcc/arm.c: arm_return_in_memory() for details.
8955
8956 Note: All versions of GCC before GCC 2.95.2 do not set up the
8957 parameters correctly for a function returning the following
8958 structure: struct { float f;}; This should be returned in memory,
8959 not a register. Richard Earnshaw sent me a patch, but I do not
8960 know of any way to detect if a function like the above has been
8961 compiled with the correct calling convention. */
8962
8963 /* All aggregate types that won't fit in a register must be returned
8964 in memory. */
8965 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE)
8966 {
8967 return 1;
8968 }
8969
8970 /* The AAPCS says all aggregates not larger than a word are returned
8971 in a register. */
8972 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS)
8973 return 0;
8974
8975 /* The only aggregate types that can be returned in a register are
8976 structs and unions. Arrays must be returned in memory. */
8977 code = TYPE_CODE (type);
8978 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code))
8979 {
8980 return 1;
8981 }
8982
8983 /* Assume all other aggregate types can be returned in a register.
8984 Run a check for structures, unions and arrays. */
8985 nRc = 0;
8986
8987 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code))
8988 {
8989 int i;
8990 /* Need to check if this struct/union is "integer" like. For
8991 this to be true, its size must be less than or equal to
8992 INT_REGISTER_SIZE and the offset of each addressable
8993 subfield must be zero. Note that bit fields are not
8994 addressable, and unions always start at offset zero. If any
8995 of the subfields is a floating point type, the struct/union
8996 cannot be an integer type. */
8997
8998 /* For each field in the object, check:
8999 1) Is it FP? --> yes, nRc = 1;
9000 2) Is it addressable (bitpos != 0) and
9001 not packed (bitsize == 0)?
9002 --> yes, nRc = 1
9003 */
9004
9005 for (i = 0; i < TYPE_NFIELDS (type); i++)
9006 {
9007 enum type_code field_type_code;
9008 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type,
9009 i)));
9010
9011 /* Is it a floating point type field? */
9012 if (field_type_code == TYPE_CODE_FLT)
9013 {
9014 nRc = 1;
9015 break;
9016 }
9017
9018 /* If bitpos != 0, then we have to care about it. */
9019 if (TYPE_FIELD_BITPOS (type, i) != 0)
9020 {
9021 /* Bitfields are not addressable. If the field bitsize is
9022 zero, then the field is not packed. Hence it cannot be
9023 a bitfield or any other packed type. */
9024 if (TYPE_FIELD_BITSIZE (type, i) == 0)
9025 {
9026 nRc = 1;
9027 break;
9028 }
9029 }
9030 }
9031 }
9032
9033 return nRc;
9034 }
9035
9036 /* Write into appropriate registers a function return value of type
9037 TYPE, given in virtual format. */
9038
9039 static void
9040 arm_store_return_value (struct type *type, struct regcache *regs,
9041 const gdb_byte *valbuf)
9042 {
9043 struct gdbarch *gdbarch = get_regcache_arch (regs);
9044 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9045
9046 if (TYPE_CODE (type) == TYPE_CODE_FLT)
9047 {
9048 gdb_byte buf[MAX_REGISTER_SIZE];
9049
9050 switch (gdbarch_tdep (gdbarch)->fp_model)
9051 {
9052 case ARM_FLOAT_FPA:
9053
9054 convert_to_extended (floatformat_from_type (type), buf, valbuf,
9055 gdbarch_byte_order (gdbarch));
9056 regcache_cooked_write (regs, ARM_F0_REGNUM, buf);
9057 break;
9058
9059 case ARM_FLOAT_SOFT_FPA:
9060 case ARM_FLOAT_SOFT_VFP:
9061 /* ARM_FLOAT_VFP can arise if this is a variadic function so
9062 not using the VFP ABI code. */
9063 case ARM_FLOAT_VFP:
9064 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf);
9065 if (TYPE_LENGTH (type) > 4)
9066 regcache_cooked_write (regs, ARM_A1_REGNUM + 1,
9067 valbuf + INT_REGISTER_SIZE);
9068 break;
9069
9070 default:
9071 internal_error (__FILE__, __LINE__,
9072 _("arm_store_return_value: Floating "
9073 "point model not supported"));
9074 break;
9075 }
9076 }
9077 else if (TYPE_CODE (type) == TYPE_CODE_INT
9078 || TYPE_CODE (type) == TYPE_CODE_CHAR
9079 || TYPE_CODE (type) == TYPE_CODE_BOOL
9080 || TYPE_CODE (type) == TYPE_CODE_PTR
9081 || TYPE_CODE (type) == TYPE_CODE_REF
9082 || TYPE_CODE (type) == TYPE_CODE_ENUM)
9083 {
9084 if (TYPE_LENGTH (type) <= 4)
9085 {
9086 /* Values of one word or less are zero/sign-extended and
9087 returned in r0. */
9088 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9089 LONGEST val = unpack_long (type, valbuf);
9090
9091 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val);
9092 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf);
9093 }
9094 else
9095 {
9096 /* Integral values greater than one word are stored in consecutive
9097 registers starting with r0. This will always be a multiple of
9098 the regiser size. */
9099 int len = TYPE_LENGTH (type);
9100 int regno = ARM_A1_REGNUM;
9101
9102 while (len > 0)
9103 {
9104 regcache_cooked_write (regs, regno++, valbuf);
9105 len -= INT_REGISTER_SIZE;
9106 valbuf += INT_REGISTER_SIZE;
9107 }
9108 }
9109 }
9110 else
9111 {
9112 /* For a structure or union the behaviour is as if the value had
9113 been stored to word-aligned memory and then loaded into
9114 registers with 32-bit load instruction(s). */
9115 int len = TYPE_LENGTH (type);
9116 int regno = ARM_A1_REGNUM;
9117 bfd_byte tmpbuf[INT_REGISTER_SIZE];
9118
9119 while (len > 0)
9120 {
9121 memcpy (tmpbuf, valbuf,
9122 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len);
9123 regcache_cooked_write (regs, regno++, tmpbuf);
9124 len -= INT_REGISTER_SIZE;
9125 valbuf += INT_REGISTER_SIZE;
9126 }
9127 }
9128 }
9129
9130
9131 /* Handle function return values. */
9132
9133 static enum return_value_convention
9134 arm_return_value (struct gdbarch *gdbarch, struct value *function,
9135 struct type *valtype, struct regcache *regcache,
9136 gdb_byte *readbuf, const gdb_byte *writebuf)
9137 {
9138 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9139 struct type *func_type = function ? value_type (function) : NULL;
9140 enum arm_vfp_cprc_base_type vfp_base_type;
9141 int vfp_base_count;
9142
9143 if (arm_vfp_abi_for_function (gdbarch, func_type)
9144 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count))
9145 {
9146 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type);
9147 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type);
9148 int i;
9149 for (i = 0; i < vfp_base_count; i++)
9150 {
9151 if (reg_char == 'q')
9152 {
9153 if (writebuf)
9154 arm_neon_quad_write (gdbarch, regcache, i,
9155 writebuf + i * unit_length);
9156
9157 if (readbuf)
9158 arm_neon_quad_read (gdbarch, regcache, i,
9159 readbuf + i * unit_length);
9160 }
9161 else
9162 {
9163 char name_buf[4];
9164 int regnum;
9165
9166 xsnprintf (name_buf, sizeof (name_buf), "%c%d", reg_char, i);
9167 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9168 strlen (name_buf));
9169 if (writebuf)
9170 regcache_cooked_write (regcache, regnum,
9171 writebuf + i * unit_length);
9172 if (readbuf)
9173 regcache_cooked_read (regcache, regnum,
9174 readbuf + i * unit_length);
9175 }
9176 }
9177 return RETURN_VALUE_REGISTER_CONVENTION;
9178 }
9179
9180 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
9181 || TYPE_CODE (valtype) == TYPE_CODE_UNION
9182 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
9183 {
9184 if (tdep->struct_return == pcc_struct_return
9185 || arm_return_in_memory (gdbarch, valtype))
9186 return RETURN_VALUE_STRUCT_CONVENTION;
9187 }
9188
9189 /* AAPCS returns complex types longer than a register in memory. */
9190 if (tdep->arm_abi != ARM_ABI_APCS
9191 && TYPE_CODE (valtype) == TYPE_CODE_COMPLEX
9192 && TYPE_LENGTH (valtype) > INT_REGISTER_SIZE)
9193 return RETURN_VALUE_STRUCT_CONVENTION;
9194
9195 if (writebuf)
9196 arm_store_return_value (valtype, regcache, writebuf);
9197
9198 if (readbuf)
9199 arm_extract_return_value (valtype, regcache, readbuf);
9200
9201 return RETURN_VALUE_REGISTER_CONVENTION;
9202 }
9203
9204
9205 static int
9206 arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
9207 {
9208 struct gdbarch *gdbarch = get_frame_arch (frame);
9209 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
9210 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
9211 CORE_ADDR jb_addr;
9212 gdb_byte buf[INT_REGISTER_SIZE];
9213
9214 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM);
9215
9216 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
9217 INT_REGISTER_SIZE))
9218 return 0;
9219
9220 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order);
9221 return 1;
9222 }
9223
9224 /* Recognize GCC and GNU ld's trampolines. If we are in a trampoline,
9225 return the target PC. Otherwise return 0. */
9226
9227 CORE_ADDR
9228 arm_skip_stub (struct frame_info *frame, CORE_ADDR pc)
9229 {
9230 const char *name;
9231 int namelen;
9232 CORE_ADDR start_addr;
9233
9234 /* Find the starting address and name of the function containing the PC. */
9235 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0)
9236 return 0;
9237
9238 /* If PC is in a Thumb call or return stub, return the address of the
9239 target PC, which is in a register. The thunk functions are called
9240 _call_via_xx, where x is the register name. The possible names
9241 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar
9242 functions, named __ARM_call_via_r[0-7]. */
9243 if (strncmp (name, "_call_via_", 10) == 0
9244 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0)
9245 {
9246 /* Use the name suffix to determine which register contains the
9247 target PC. */
9248 static char *table[15] =
9249 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
9250 "r8", "r9", "sl", "fp", "ip", "sp", "lr"
9251 };
9252 int regno;
9253 int offset = strlen (name) - 2;
9254
9255 for (regno = 0; regno <= 14; regno++)
9256 if (strcmp (&name[offset], table[regno]) == 0)
9257 return get_frame_register_unsigned (frame, regno);
9258 }
9259
9260 /* GNU ld generates __foo_from_arm or __foo_from_thumb for
9261 non-interworking calls to foo. We could decode the stubs
9262 to find the target but it's easier to use the symbol table. */
9263 namelen = strlen (name);
9264 if (name[0] == '_' && name[1] == '_'
9265 && ((namelen > 2 + strlen ("_from_thumb")
9266 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb",
9267 strlen ("_from_thumb")) == 0)
9268 || (namelen > 2 + strlen ("_from_arm")
9269 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm",
9270 strlen ("_from_arm")) == 0)))
9271 {
9272 char *target_name;
9273 int target_len = namelen - 2;
9274 struct minimal_symbol *minsym;
9275 struct objfile *objfile;
9276 struct obj_section *sec;
9277
9278 if (name[namelen - 1] == 'b')
9279 target_len -= strlen ("_from_thumb");
9280 else
9281 target_len -= strlen ("_from_arm");
9282
9283 target_name = alloca (target_len + 1);
9284 memcpy (target_name, name + 2, target_len);
9285 target_name[target_len] = '\0';
9286
9287 sec = find_pc_section (pc);
9288 objfile = (sec == NULL) ? NULL : sec->objfile;
9289 minsym = lookup_minimal_symbol (target_name, NULL, objfile);
9290 if (minsym != NULL)
9291 return SYMBOL_VALUE_ADDRESS (minsym);
9292 else
9293 return 0;
9294 }
9295
9296 return 0; /* not a stub */
9297 }
9298
9299 static void
9300 set_arm_command (char *args, int from_tty)
9301 {
9302 printf_unfiltered (_("\
9303 \"set arm\" must be followed by an apporpriate subcommand.\n"));
9304 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout);
9305 }
9306
9307 static void
9308 show_arm_command (char *args, int from_tty)
9309 {
9310 cmd_show_list (showarmcmdlist, from_tty, "");
9311 }
9312
9313 static void
9314 arm_update_current_architecture (void)
9315 {
9316 struct gdbarch_info info;
9317
9318 /* If the current architecture is not ARM, we have nothing to do. */
9319 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_arm)
9320 return;
9321
9322 /* Update the architecture. */
9323 gdbarch_info_init (&info);
9324
9325 if (!gdbarch_update_p (info))
9326 internal_error (__FILE__, __LINE__, _("could not update architecture"));
9327 }
9328
9329 static void
9330 set_fp_model_sfunc (char *args, int from_tty,
9331 struct cmd_list_element *c)
9332 {
9333 enum arm_float_model fp_model;
9334
9335 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++)
9336 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0)
9337 {
9338 arm_fp_model = fp_model;
9339 break;
9340 }
9341
9342 if (fp_model == ARM_FLOAT_LAST)
9343 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."),
9344 current_fp_model);
9345
9346 arm_update_current_architecture ();
9347 }
9348
9349 static void
9350 show_fp_model (struct ui_file *file, int from_tty,
9351 struct cmd_list_element *c, const char *value)
9352 {
9353 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9354
9355 if (arm_fp_model == ARM_FLOAT_AUTO
9356 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9357 fprintf_filtered (file, _("\
9358 The current ARM floating point model is \"auto\" (currently \"%s\").\n"),
9359 fp_model_strings[tdep->fp_model]);
9360 else
9361 fprintf_filtered (file, _("\
9362 The current ARM floating point model is \"%s\".\n"),
9363 fp_model_strings[arm_fp_model]);
9364 }
9365
9366 static void
9367 arm_set_abi (char *args, int from_tty,
9368 struct cmd_list_element *c)
9369 {
9370 enum arm_abi_kind arm_abi;
9371
9372 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++)
9373 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0)
9374 {
9375 arm_abi_global = arm_abi;
9376 break;
9377 }
9378
9379 if (arm_abi == ARM_ABI_LAST)
9380 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."),
9381 arm_abi_string);
9382
9383 arm_update_current_architecture ();
9384 }
9385
9386 static void
9387 arm_show_abi (struct ui_file *file, int from_tty,
9388 struct cmd_list_element *c, const char *value)
9389 {
9390 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9391
9392 if (arm_abi_global == ARM_ABI_AUTO
9393 && gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_arm)
9394 fprintf_filtered (file, _("\
9395 The current ARM ABI is \"auto\" (currently \"%s\").\n"),
9396 arm_abi_strings[tdep->arm_abi]);
9397 else
9398 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"),
9399 arm_abi_string);
9400 }
9401
9402 static void
9403 arm_show_fallback_mode (struct ui_file *file, int from_tty,
9404 struct cmd_list_element *c, const char *value)
9405 {
9406 fprintf_filtered (file,
9407 _("The current execution mode assumed "
9408 "(when symbols are unavailable) is \"%s\".\n"),
9409 arm_fallback_mode_string);
9410 }
9411
9412 static void
9413 arm_show_force_mode (struct ui_file *file, int from_tty,
9414 struct cmd_list_element *c, const char *value)
9415 {
9416 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch ());
9417
9418 fprintf_filtered (file,
9419 _("The current execution mode assumed "
9420 "(even when symbols are available) is \"%s\".\n"),
9421 arm_force_mode_string);
9422 }
9423
9424 /* If the user changes the register disassembly style used for info
9425 register and other commands, we have to also switch the style used
9426 in opcodes for disassembly output. This function is run in the "set
9427 arm disassembly" command, and does that. */
9428
9429 static void
9430 set_disassembly_style_sfunc (char *args, int from_tty,
9431 struct cmd_list_element *c)
9432 {
9433 set_disassembly_style ();
9434 }
9435 \f
9436 /* Return the ARM register name corresponding to register I. */
9437 static const char *
9438 arm_register_name (struct gdbarch *gdbarch, int i)
9439 {
9440 const int num_regs = gdbarch_num_regs (gdbarch);
9441
9442 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos
9443 && i >= num_regs && i < num_regs + 32)
9444 {
9445 static const char *const vfp_pseudo_names[] = {
9446 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
9447 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
9448 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
9449 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
9450 };
9451
9452 return vfp_pseudo_names[i - num_regs];
9453 }
9454
9455 if (gdbarch_tdep (gdbarch)->have_neon_pseudos
9456 && i >= num_regs + 32 && i < num_regs + 32 + 16)
9457 {
9458 static const char *const neon_pseudo_names[] = {
9459 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
9460 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
9461 };
9462
9463 return neon_pseudo_names[i - num_regs - 32];
9464 }
9465
9466 if (i >= ARRAY_SIZE (arm_register_names))
9467 /* These registers are only supported on targets which supply
9468 an XML description. */
9469 return "";
9470
9471 return arm_register_names[i];
9472 }
9473
9474 static void
9475 set_disassembly_style (void)
9476 {
9477 int current;
9478
9479 /* Find the style that the user wants. */
9480 for (current = 0; current < num_disassembly_options; current++)
9481 if (disassembly_style == valid_disassembly_styles[current])
9482 break;
9483 gdb_assert (current < num_disassembly_options);
9484
9485 /* Synchronize the disassembler. */
9486 set_arm_regname_option (current);
9487 }
9488
9489 /* Test whether the coff symbol specific value corresponds to a Thumb
9490 function. */
9491
9492 static int
9493 coff_sym_is_thumb (int val)
9494 {
9495 return (val == C_THUMBEXT
9496 || val == C_THUMBSTAT
9497 || val == C_THUMBEXTFUNC
9498 || val == C_THUMBSTATFUNC
9499 || val == C_THUMBLABEL);
9500 }
9501
9502 /* arm_coff_make_msymbol_special()
9503 arm_elf_make_msymbol_special()
9504
9505 These functions test whether the COFF or ELF symbol corresponds to
9506 an address in thumb code, and set a "special" bit in a minimal
9507 symbol to indicate that it does. */
9508
9509 static void
9510 arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym)
9511 {
9512 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym)
9513 == ST_BRANCH_TO_THUMB)
9514 MSYMBOL_SET_SPECIAL (msym);
9515 }
9516
9517 static void
9518 arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym)
9519 {
9520 if (coff_sym_is_thumb (val))
9521 MSYMBOL_SET_SPECIAL (msym);
9522 }
9523
9524 static void
9525 arm_objfile_data_free (struct objfile *objfile, void *arg)
9526 {
9527 struct arm_per_objfile *data = arg;
9528 unsigned int i;
9529
9530 for (i = 0; i < objfile->obfd->section_count; i++)
9531 VEC_free (arm_mapping_symbol_s, data->section_maps[i]);
9532 }
9533
9534 static void
9535 arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile,
9536 asymbol *sym)
9537 {
9538 const char *name = bfd_asymbol_name (sym);
9539 struct arm_per_objfile *data;
9540 VEC(arm_mapping_symbol_s) **map_p;
9541 struct arm_mapping_symbol new_map_sym;
9542
9543 gdb_assert (name[0] == '$');
9544 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd')
9545 return;
9546
9547 data = objfile_data (objfile, arm_objfile_data_key);
9548 if (data == NULL)
9549 {
9550 data = OBSTACK_ZALLOC (&objfile->objfile_obstack,
9551 struct arm_per_objfile);
9552 set_objfile_data (objfile, arm_objfile_data_key, data);
9553 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack,
9554 objfile->obfd->section_count,
9555 VEC(arm_mapping_symbol_s) *);
9556 }
9557 map_p = &data->section_maps[bfd_get_section (sym)->index];
9558
9559 new_map_sym.value = sym->value;
9560 new_map_sym.type = name[1];
9561
9562 /* Assume that most mapping symbols appear in order of increasing
9563 value. If they were randomly distributed, it would be faster to
9564 always push here and then sort at first use. */
9565 if (!VEC_empty (arm_mapping_symbol_s, *map_p))
9566 {
9567 struct arm_mapping_symbol *prev_map_sym;
9568
9569 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p);
9570 if (prev_map_sym->value >= sym->value)
9571 {
9572 unsigned int idx;
9573 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym,
9574 arm_compare_mapping_symbols);
9575 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym);
9576 return;
9577 }
9578 }
9579
9580 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym);
9581 }
9582
9583 static void
9584 arm_write_pc (struct regcache *regcache, CORE_ADDR pc)
9585 {
9586 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9587 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc);
9588
9589 /* If necessary, set the T bit. */
9590 if (arm_apcs_32)
9591 {
9592 ULONGEST val, t_bit;
9593 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val);
9594 t_bit = arm_psr_thumb_bit (gdbarch);
9595 if (arm_pc_is_thumb (gdbarch, pc))
9596 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9597 val | t_bit);
9598 else
9599 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM,
9600 val & ~t_bit);
9601 }
9602 }
9603
9604 /* Read the contents of a NEON quad register, by reading from two
9605 double registers. This is used to implement the quad pseudo
9606 registers, and for argument passing in case the quad registers are
9607 missing; vectors are passed in quad registers when using the VFP
9608 ABI, even if a NEON unit is not present. REGNUM is the index of
9609 the quad register, in [0, 15]. */
9610
9611 static enum register_status
9612 arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache,
9613 int regnum, gdb_byte *buf)
9614 {
9615 char name_buf[4];
9616 gdb_byte reg_buf[8];
9617 int offset, double_regnum;
9618 enum register_status status;
9619
9620 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9621 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9622 strlen (name_buf));
9623
9624 /* d0 is always the least significant half of q0. */
9625 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9626 offset = 8;
9627 else
9628 offset = 0;
9629
9630 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9631 if (status != REG_VALID)
9632 return status;
9633 memcpy (buf + offset, reg_buf, 8);
9634
9635 offset = 8 - offset;
9636 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf);
9637 if (status != REG_VALID)
9638 return status;
9639 memcpy (buf + offset, reg_buf, 8);
9640
9641 return REG_VALID;
9642 }
9643
9644 static enum register_status
9645 arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache,
9646 int regnum, gdb_byte *buf)
9647 {
9648 const int num_regs = gdbarch_num_regs (gdbarch);
9649 char name_buf[4];
9650 gdb_byte reg_buf[8];
9651 int offset, double_regnum;
9652
9653 gdb_assert (regnum >= num_regs);
9654 regnum -= num_regs;
9655
9656 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9657 /* Quad-precision register. */
9658 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf);
9659 else
9660 {
9661 enum register_status status;
9662
9663 /* Single-precision register. */
9664 gdb_assert (regnum < 32);
9665
9666 /* s0 is always the least significant half of d0. */
9667 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9668 offset = (regnum & 1) ? 0 : 4;
9669 else
9670 offset = (regnum & 1) ? 4 : 0;
9671
9672 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9673 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9674 strlen (name_buf));
9675
9676 status = regcache_raw_read (regcache, double_regnum, reg_buf);
9677 if (status == REG_VALID)
9678 memcpy (buf, reg_buf + offset, 4);
9679 return status;
9680 }
9681 }
9682
9683 /* Store the contents of BUF to a NEON quad register, by writing to
9684 two double registers. This is used to implement the quad pseudo
9685 registers, and for argument passing in case the quad registers are
9686 missing; vectors are passed in quad registers when using the VFP
9687 ABI, even if a NEON unit is not present. REGNUM is the index
9688 of the quad register, in [0, 15]. */
9689
9690 static void
9691 arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache,
9692 int regnum, const gdb_byte *buf)
9693 {
9694 char name_buf[4];
9695 int offset, double_regnum;
9696
9697 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum << 1);
9698 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9699 strlen (name_buf));
9700
9701 /* d0 is always the least significant half of q0. */
9702 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9703 offset = 8;
9704 else
9705 offset = 0;
9706
9707 regcache_raw_write (regcache, double_regnum, buf + offset);
9708 offset = 8 - offset;
9709 regcache_raw_write (regcache, double_regnum + 1, buf + offset);
9710 }
9711
9712 static void
9713 arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
9714 int regnum, const gdb_byte *buf)
9715 {
9716 const int num_regs = gdbarch_num_regs (gdbarch);
9717 char name_buf[4];
9718 gdb_byte reg_buf[8];
9719 int offset, double_regnum;
9720
9721 gdb_assert (regnum >= num_regs);
9722 regnum -= num_regs;
9723
9724 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48)
9725 /* Quad-precision register. */
9726 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf);
9727 else
9728 {
9729 /* Single-precision register. */
9730 gdb_assert (regnum < 32);
9731
9732 /* s0 is always the least significant half of d0. */
9733 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG)
9734 offset = (regnum & 1) ? 0 : 4;
9735 else
9736 offset = (regnum & 1) ? 4 : 0;
9737
9738 xsnprintf (name_buf, sizeof (name_buf), "d%d", regnum >> 1);
9739 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf,
9740 strlen (name_buf));
9741
9742 regcache_raw_read (regcache, double_regnum, reg_buf);
9743 memcpy (reg_buf + offset, buf, 4);
9744 regcache_raw_write (regcache, double_regnum, reg_buf);
9745 }
9746 }
9747
9748 static struct value *
9749 value_of_arm_user_reg (struct frame_info *frame, const void *baton)
9750 {
9751 const int *reg_p = baton;
9752 return value_of_register (*reg_p, frame);
9753 }
9754 \f
9755 static enum gdb_osabi
9756 arm_elf_osabi_sniffer (bfd *abfd)
9757 {
9758 unsigned int elfosabi;
9759 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN;
9760
9761 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI];
9762
9763 if (elfosabi == ELFOSABI_ARM)
9764 /* GNU tools use this value. Check note sections in this case,
9765 as well. */
9766 bfd_map_over_sections (abfd,
9767 generic_elf_osabi_sniff_abi_tag_sections,
9768 &osabi);
9769
9770 /* Anything else will be handled by the generic ELF sniffer. */
9771 return osabi;
9772 }
9773
9774 static int
9775 arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
9776 struct reggroup *group)
9777 {
9778 /* FPS register's type is INT, but belongs to float_reggroup. Beside
9779 this, FPS register belongs to save_regroup, restore_reggroup, and
9780 all_reggroup, of course. */
9781 if (regnum == ARM_FPS_REGNUM)
9782 return (group == float_reggroup
9783 || group == save_reggroup
9784 || group == restore_reggroup
9785 || group == all_reggroup);
9786 else
9787 return default_register_reggroup_p (gdbarch, regnum, group);
9788 }
9789
9790 \f
9791 /* For backward-compatibility we allow two 'g' packet lengths with
9792 the remote protocol depending on whether FPA registers are
9793 supplied. M-profile targets do not have FPA registers, but some
9794 stubs already exist in the wild which use a 'g' packet which
9795 supplies them albeit with dummy values. The packet format which
9796 includes FPA registers should be considered deprecated for
9797 M-profile targets. */
9798
9799 static void
9800 arm_register_g_packet_guesses (struct gdbarch *gdbarch)
9801 {
9802 if (gdbarch_tdep (gdbarch)->is_m)
9803 {
9804 /* If we know from the executable this is an M-profile target,
9805 cater for remote targets whose register set layout is the
9806 same as the FPA layout. */
9807 register_remote_g_packet_guess (gdbarch,
9808 /* r0-r12,sp,lr,pc; f0-f7; fps,xpsr */
9809 (16 * INT_REGISTER_SIZE)
9810 + (8 * FP_REGISTER_SIZE)
9811 + (2 * INT_REGISTER_SIZE),
9812 tdesc_arm_with_m_fpa_layout);
9813
9814 /* The regular M-profile layout. */
9815 register_remote_g_packet_guess (gdbarch,
9816 /* r0-r12,sp,lr,pc; xpsr */
9817 (16 * INT_REGISTER_SIZE)
9818 + INT_REGISTER_SIZE,
9819 tdesc_arm_with_m);
9820
9821 /* M-profile plus M4F VFP. */
9822 register_remote_g_packet_guess (gdbarch,
9823 /* r0-r12,sp,lr,pc; d0-d15; fpscr,xpsr */
9824 (16 * INT_REGISTER_SIZE)
9825 + (16 * VFP_REGISTER_SIZE)
9826 + (2 * INT_REGISTER_SIZE),
9827 tdesc_arm_with_m_vfp_d16);
9828 }
9829
9830 /* Otherwise we don't have a useful guess. */
9831 }
9832
9833 \f
9834 /* Initialize the current architecture based on INFO. If possible,
9835 re-use an architecture from ARCHES, which is a list of
9836 architectures already created during this debugging session.
9837
9838 Called e.g. at program startup, when reading a core file, and when
9839 reading a binary file. */
9840
9841 static struct gdbarch *
9842 arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
9843 {
9844 struct gdbarch_tdep *tdep;
9845 struct gdbarch *gdbarch;
9846 struct gdbarch_list *best_arch;
9847 enum arm_abi_kind arm_abi = arm_abi_global;
9848 enum arm_float_model fp_model = arm_fp_model;
9849 struct tdesc_arch_data *tdesc_data = NULL;
9850 int i, is_m = 0;
9851 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0;
9852 int have_neon = 0;
9853 int have_fpa_registers = 1;
9854 const struct target_desc *tdesc = info.target_desc;
9855
9856 /* If we have an object to base this architecture on, try to determine
9857 its ABI. */
9858
9859 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL)
9860 {
9861 int ei_osabi, e_flags;
9862
9863 switch (bfd_get_flavour (info.abfd))
9864 {
9865 case bfd_target_aout_flavour:
9866 /* Assume it's an old APCS-style ABI. */
9867 arm_abi = ARM_ABI_APCS;
9868 break;
9869
9870 case bfd_target_coff_flavour:
9871 /* Assume it's an old APCS-style ABI. */
9872 /* XXX WinCE? */
9873 arm_abi = ARM_ABI_APCS;
9874 break;
9875
9876 case bfd_target_elf_flavour:
9877 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI];
9878 e_flags = elf_elfheader (info.abfd)->e_flags;
9879
9880 if (ei_osabi == ELFOSABI_ARM)
9881 {
9882 /* GNU tools used to use this value, but do not for EABI
9883 objects. There's nowhere to tag an EABI version
9884 anyway, so assume APCS. */
9885 arm_abi = ARM_ABI_APCS;
9886 }
9887 else if (ei_osabi == ELFOSABI_NONE)
9888 {
9889 int eabi_ver = EF_ARM_EABI_VERSION (e_flags);
9890 int attr_arch, attr_profile;
9891
9892 switch (eabi_ver)
9893 {
9894 case EF_ARM_EABI_UNKNOWN:
9895 /* Assume GNU tools. */
9896 arm_abi = ARM_ABI_APCS;
9897 break;
9898
9899 case EF_ARM_EABI_VER4:
9900 case EF_ARM_EABI_VER5:
9901 arm_abi = ARM_ABI_AAPCS;
9902 /* EABI binaries default to VFP float ordering.
9903 They may also contain build attributes that can
9904 be used to identify if the VFP argument-passing
9905 ABI is in use. */
9906 if (fp_model == ARM_FLOAT_AUTO)
9907 {
9908 #ifdef HAVE_ELF
9909 switch (bfd_elf_get_obj_attr_int (info.abfd,
9910 OBJ_ATTR_PROC,
9911 Tag_ABI_VFP_args))
9912 {
9913 case 0:
9914 /* "The user intended FP parameter/result
9915 passing to conform to AAPCS, base
9916 variant". */
9917 fp_model = ARM_FLOAT_SOFT_VFP;
9918 break;
9919 case 1:
9920 /* "The user intended FP parameter/result
9921 passing to conform to AAPCS, VFP
9922 variant". */
9923 fp_model = ARM_FLOAT_VFP;
9924 break;
9925 case 2:
9926 /* "The user intended FP parameter/result
9927 passing to conform to tool chain-specific
9928 conventions" - we don't know any such
9929 conventions, so leave it as "auto". */
9930 break;
9931 default:
9932 /* Attribute value not mentioned in the
9933 October 2008 ABI, so leave it as
9934 "auto". */
9935 break;
9936 }
9937 #else
9938 fp_model = ARM_FLOAT_SOFT_VFP;
9939 #endif
9940 }
9941 break;
9942
9943 default:
9944 /* Leave it as "auto". */
9945 warning (_("unknown ARM EABI version 0x%x"), eabi_ver);
9946 break;
9947 }
9948
9949 #ifdef HAVE_ELF
9950 /* Detect M-profile programs. This only works if the
9951 executable file includes build attributes; GCC does
9952 copy them to the executable, but e.g. RealView does
9953 not. */
9954 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC,
9955 Tag_CPU_arch);
9956 attr_profile = bfd_elf_get_obj_attr_int (info.abfd,
9957 OBJ_ATTR_PROC,
9958 Tag_CPU_arch_profile);
9959 /* GCC specifies the profile for v6-M; RealView only
9960 specifies the profile for architectures starting with
9961 V7 (as opposed to architectures with a tag
9962 numerically greater than TAG_CPU_ARCH_V7). */
9963 if (!tdesc_has_registers (tdesc)
9964 && (attr_arch == TAG_CPU_ARCH_V6_M
9965 || attr_arch == TAG_CPU_ARCH_V6S_M
9966 || attr_profile == 'M'))
9967 is_m = 1;
9968 #endif
9969 }
9970
9971 if (fp_model == ARM_FLOAT_AUTO)
9972 {
9973 int e_flags = elf_elfheader (info.abfd)->e_flags;
9974
9975 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT))
9976 {
9977 case 0:
9978 /* Leave it as "auto". Strictly speaking this case
9979 means FPA, but almost nobody uses that now, and
9980 many toolchains fail to set the appropriate bits
9981 for the floating-point model they use. */
9982 break;
9983 case EF_ARM_SOFT_FLOAT:
9984 fp_model = ARM_FLOAT_SOFT_FPA;
9985 break;
9986 case EF_ARM_VFP_FLOAT:
9987 fp_model = ARM_FLOAT_VFP;
9988 break;
9989 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT:
9990 fp_model = ARM_FLOAT_SOFT_VFP;
9991 break;
9992 }
9993 }
9994
9995 if (e_flags & EF_ARM_BE8)
9996 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
9997
9998 break;
9999
10000 default:
10001 /* Leave it as "auto". */
10002 break;
10003 }
10004 }
10005
10006 /* Check any target description for validity. */
10007 if (tdesc_has_registers (tdesc))
10008 {
10009 /* For most registers we require GDB's default names; but also allow
10010 the numeric names for sp / lr / pc, as a convenience. */
10011 static const char *const arm_sp_names[] = { "r13", "sp", NULL };
10012 static const char *const arm_lr_names[] = { "r14", "lr", NULL };
10013 static const char *const arm_pc_names[] = { "r15", "pc", NULL };
10014
10015 const struct tdesc_feature *feature;
10016 int valid_p;
10017
10018 feature = tdesc_find_feature (tdesc,
10019 "org.gnu.gdb.arm.core");
10020 if (feature == NULL)
10021 {
10022 feature = tdesc_find_feature (tdesc,
10023 "org.gnu.gdb.arm.m-profile");
10024 if (feature == NULL)
10025 return NULL;
10026 else
10027 is_m = 1;
10028 }
10029
10030 tdesc_data = tdesc_data_alloc ();
10031
10032 valid_p = 1;
10033 for (i = 0; i < ARM_SP_REGNUM; i++)
10034 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10035 arm_register_names[i]);
10036 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10037 ARM_SP_REGNUM,
10038 arm_sp_names);
10039 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10040 ARM_LR_REGNUM,
10041 arm_lr_names);
10042 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data,
10043 ARM_PC_REGNUM,
10044 arm_pc_names);
10045 if (is_m)
10046 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10047 ARM_PS_REGNUM, "xpsr");
10048 else
10049 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10050 ARM_PS_REGNUM, "cpsr");
10051
10052 if (!valid_p)
10053 {
10054 tdesc_data_cleanup (tdesc_data);
10055 return NULL;
10056 }
10057
10058 feature = tdesc_find_feature (tdesc,
10059 "org.gnu.gdb.arm.fpa");
10060 if (feature != NULL)
10061 {
10062 valid_p = 1;
10063 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++)
10064 valid_p &= tdesc_numbered_register (feature, tdesc_data, i,
10065 arm_register_names[i]);
10066 if (!valid_p)
10067 {
10068 tdesc_data_cleanup (tdesc_data);
10069 return NULL;
10070 }
10071 }
10072 else
10073 have_fpa_registers = 0;
10074
10075 feature = tdesc_find_feature (tdesc,
10076 "org.gnu.gdb.xscale.iwmmxt");
10077 if (feature != NULL)
10078 {
10079 static const char *const iwmmxt_names[] = {
10080 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7",
10081 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15",
10082 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "",
10083 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "",
10084 };
10085
10086 valid_p = 1;
10087 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++)
10088 valid_p
10089 &= tdesc_numbered_register (feature, tdesc_data, i,
10090 iwmmxt_names[i - ARM_WR0_REGNUM]);
10091
10092 /* Check for the control registers, but do not fail if they
10093 are missing. */
10094 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++)
10095 tdesc_numbered_register (feature, tdesc_data, i,
10096 iwmmxt_names[i - ARM_WR0_REGNUM]);
10097
10098 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++)
10099 valid_p
10100 &= tdesc_numbered_register (feature, tdesc_data, i,
10101 iwmmxt_names[i - ARM_WR0_REGNUM]);
10102
10103 if (!valid_p)
10104 {
10105 tdesc_data_cleanup (tdesc_data);
10106 return NULL;
10107 }
10108 }
10109
10110 /* If we have a VFP unit, check whether the single precision registers
10111 are present. If not, then we will synthesize them as pseudo
10112 registers. */
10113 feature = tdesc_find_feature (tdesc,
10114 "org.gnu.gdb.arm.vfp");
10115 if (feature != NULL)
10116 {
10117 static const char *const vfp_double_names[] = {
10118 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
10119 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
10120 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
10121 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
10122 };
10123
10124 /* Require the double precision registers. There must be either
10125 16 or 32. */
10126 valid_p = 1;
10127 for (i = 0; i < 32; i++)
10128 {
10129 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10130 ARM_D0_REGNUM + i,
10131 vfp_double_names[i]);
10132 if (!valid_p)
10133 break;
10134 }
10135 if (!valid_p && i == 16)
10136 valid_p = 1;
10137
10138 /* Also require FPSCR. */
10139 valid_p &= tdesc_numbered_register (feature, tdesc_data,
10140 ARM_FPSCR_REGNUM, "fpscr");
10141 if (!valid_p)
10142 {
10143 tdesc_data_cleanup (tdesc_data);
10144 return NULL;
10145 }
10146
10147 if (tdesc_unnumbered_register (feature, "s0") == 0)
10148 have_vfp_pseudos = 1;
10149
10150 have_vfp_registers = 1;
10151
10152 /* If we have VFP, also check for NEON. The architecture allows
10153 NEON without VFP (integer vector operations only), but GDB
10154 does not support that. */
10155 feature = tdesc_find_feature (tdesc,
10156 "org.gnu.gdb.arm.neon");
10157 if (feature != NULL)
10158 {
10159 /* NEON requires 32 double-precision registers. */
10160 if (i != 32)
10161 {
10162 tdesc_data_cleanup (tdesc_data);
10163 return NULL;
10164 }
10165
10166 /* If there are quad registers defined by the stub, use
10167 their type; otherwise (normally) provide them with
10168 the default type. */
10169 if (tdesc_unnumbered_register (feature, "q0") == 0)
10170 have_neon_pseudos = 1;
10171
10172 have_neon = 1;
10173 }
10174 }
10175 }
10176
10177 /* If there is already a candidate, use it. */
10178 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
10179 best_arch != NULL;
10180 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
10181 {
10182 if (arm_abi != ARM_ABI_AUTO
10183 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi)
10184 continue;
10185
10186 if (fp_model != ARM_FLOAT_AUTO
10187 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model)
10188 continue;
10189
10190 /* There are various other properties in tdep that we do not
10191 need to check here: those derived from a target description,
10192 since gdbarches with a different target description are
10193 automatically disqualified. */
10194
10195 /* Do check is_m, though, since it might come from the binary. */
10196 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m)
10197 continue;
10198
10199 /* Found a match. */
10200 break;
10201 }
10202
10203 if (best_arch != NULL)
10204 {
10205 if (tdesc_data != NULL)
10206 tdesc_data_cleanup (tdesc_data);
10207 return best_arch->gdbarch;
10208 }
10209
10210 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
10211 gdbarch = gdbarch_alloc (&info, tdep);
10212
10213 /* Record additional information about the architecture we are defining.
10214 These are gdbarch discriminators, like the OSABI. */
10215 tdep->arm_abi = arm_abi;
10216 tdep->fp_model = fp_model;
10217 tdep->is_m = is_m;
10218 tdep->have_fpa_registers = have_fpa_registers;
10219 tdep->have_vfp_registers = have_vfp_registers;
10220 tdep->have_vfp_pseudos = have_vfp_pseudos;
10221 tdep->have_neon_pseudos = have_neon_pseudos;
10222 tdep->have_neon = have_neon;
10223
10224 arm_register_g_packet_guesses (gdbarch);
10225
10226 /* Breakpoints. */
10227 switch (info.byte_order_for_code)
10228 {
10229 case BFD_ENDIAN_BIG:
10230 tdep->arm_breakpoint = arm_default_arm_be_breakpoint;
10231 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint);
10232 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint;
10233 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint);
10234
10235 break;
10236
10237 case BFD_ENDIAN_LITTLE:
10238 tdep->arm_breakpoint = arm_default_arm_le_breakpoint;
10239 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint);
10240 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint;
10241 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint);
10242
10243 break;
10244
10245 default:
10246 internal_error (__FILE__, __LINE__,
10247 _("arm_gdbarch_init: bad byte order for float format"));
10248 }
10249
10250 /* On ARM targets char defaults to unsigned. */
10251 set_gdbarch_char_signed (gdbarch, 0);
10252
10253 /* Note: for displaced stepping, this includes the breakpoint, and one word
10254 of additional scratch space. This setting isn't used for anything beside
10255 displaced stepping at present. */
10256 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
10257
10258 /* This should be low enough for everything. */
10259 tdep->lowest_pc = 0x20;
10260 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
10261
10262 /* The default, for both APCS and AAPCS, is to return small
10263 structures in registers. */
10264 tdep->struct_return = reg_struct_return;
10265
10266 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call);
10267 set_gdbarch_frame_align (gdbarch, arm_frame_align);
10268
10269 set_gdbarch_write_pc (gdbarch, arm_write_pc);
10270
10271 /* Frame handling. */
10272 set_gdbarch_dummy_id (gdbarch, arm_dummy_id);
10273 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc);
10274 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp);
10275
10276 frame_base_set_default (gdbarch, &arm_normal_base);
10277
10278 /* Address manipulation. */
10279 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove);
10280
10281 /* Advance PC across function entry code. */
10282 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue);
10283
10284 /* Detect whether PC is in function epilogue. */
10285 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p);
10286
10287 /* Skip trampolines. */
10288 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub);
10289
10290 /* The stack grows downward. */
10291 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
10292
10293 /* Breakpoint manipulation. */
10294 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc);
10295 set_gdbarch_remote_breakpoint_from_pc (gdbarch,
10296 arm_remote_breakpoint_from_pc);
10297
10298 /* Information about registers, etc. */
10299 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM);
10300 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM);
10301 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS);
10302 set_gdbarch_register_type (gdbarch, arm_register_type);
10303 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p);
10304
10305 /* This "info float" is FPA-specific. Use the generic version if we
10306 do not have FPA. */
10307 if (gdbarch_tdep (gdbarch)->have_fpa_registers)
10308 set_gdbarch_print_float_info (gdbarch, arm_print_float_info);
10309
10310 /* Internal <-> external register number maps. */
10311 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum);
10312 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno);
10313
10314 set_gdbarch_register_name (gdbarch, arm_register_name);
10315
10316 /* Returning results. */
10317 set_gdbarch_return_value (gdbarch, arm_return_value);
10318
10319 /* Disassembly. */
10320 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm);
10321
10322 /* Minsymbol frobbing. */
10323 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special);
10324 set_gdbarch_coff_make_msymbol_special (gdbarch,
10325 arm_coff_make_msymbol_special);
10326 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol);
10327
10328 /* Thumb-2 IT block support. */
10329 set_gdbarch_adjust_breakpoint_address (gdbarch,
10330 arm_adjust_breakpoint_address);
10331
10332 /* Virtual tables. */
10333 set_gdbarch_vbit_in_delta (gdbarch, 1);
10334
10335 /* Hook in the ABI-specific overrides, if they have been registered. */
10336 gdbarch_init_osabi (info, gdbarch);
10337
10338 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg);
10339
10340 /* Add some default predicates. */
10341 if (is_m)
10342 frame_unwind_append_unwinder (gdbarch, &arm_m_exception_unwind);
10343 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind);
10344 dwarf2_append_unwinders (gdbarch);
10345 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind);
10346 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind);
10347
10348 /* Now we have tuned the configuration, set a few final things,
10349 based on what the OS ABI has told us. */
10350
10351 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI
10352 binaries are always marked. */
10353 if (tdep->arm_abi == ARM_ABI_AUTO)
10354 tdep->arm_abi = ARM_ABI_APCS;
10355
10356 /* Watchpoints are not steppable. */
10357 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
10358
10359 /* We used to default to FPA for generic ARM, but almost nobody
10360 uses that now, and we now provide a way for the user to force
10361 the model. So default to the most useful variant. */
10362 if (tdep->fp_model == ARM_FLOAT_AUTO)
10363 tdep->fp_model = ARM_FLOAT_SOFT_FPA;
10364
10365 if (tdep->jb_pc >= 0)
10366 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target);
10367
10368 /* Floating point sizes and format. */
10369 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
10370 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA)
10371 {
10372 set_gdbarch_double_format
10373 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10374 set_gdbarch_long_double_format
10375 (gdbarch, floatformats_ieee_double_littlebyte_bigword);
10376 }
10377 else
10378 {
10379 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
10380 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
10381 }
10382
10383 if (have_vfp_pseudos)
10384 {
10385 /* NOTE: These are the only pseudo registers used by
10386 the ARM target at the moment. If more are added, a
10387 little more care in numbering will be needed. */
10388
10389 int num_pseudos = 32;
10390 if (have_neon_pseudos)
10391 num_pseudos += 16;
10392 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos);
10393 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read);
10394 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write);
10395 }
10396
10397 if (tdesc_data)
10398 {
10399 set_tdesc_pseudo_register_name (gdbarch, arm_register_name);
10400
10401 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
10402
10403 /* Override tdesc_register_type to adjust the types of VFP
10404 registers for NEON. */
10405 set_gdbarch_register_type (gdbarch, arm_register_type);
10406 }
10407
10408 /* Add standard register aliases. We add aliases even for those
10409 nanes which are used by the current architecture - it's simpler,
10410 and does no harm, since nothing ever lists user registers. */
10411 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++)
10412 user_reg_add (gdbarch, arm_register_aliases[i].name,
10413 value_of_arm_user_reg, &arm_register_aliases[i].regnum);
10414
10415 return gdbarch;
10416 }
10417
10418 static void
10419 arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
10420 {
10421 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
10422
10423 if (tdep == NULL)
10424 return;
10425
10426 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"),
10427 (unsigned long) tdep->lowest_pc);
10428 }
10429
10430 extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */
10431
10432 void
10433 _initialize_arm_tdep (void)
10434 {
10435 struct ui_file *stb;
10436 long length;
10437 struct cmd_list_element *new_set, *new_show;
10438 const char *setname;
10439 const char *setdesc;
10440 const char *const *regnames;
10441 int numregs, i, j;
10442 static char *helptext;
10443 char regdesc[1024], *rdptr = regdesc;
10444 size_t rest = sizeof (regdesc);
10445
10446 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep);
10447
10448 arm_objfile_data_key
10449 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free);
10450
10451 /* Add ourselves to objfile event chain. */
10452 observer_attach_new_objfile (arm_exidx_new_objfile);
10453 arm_exidx_data_key
10454 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free);
10455
10456 /* Register an ELF OS ABI sniffer for ARM binaries. */
10457 gdbarch_register_osabi_sniffer (bfd_arch_arm,
10458 bfd_target_elf_flavour,
10459 arm_elf_osabi_sniffer);
10460
10461 /* Initialize the standard target descriptions. */
10462 initialize_tdesc_arm_with_m ();
10463 initialize_tdesc_arm_with_m_fpa_layout ();
10464 initialize_tdesc_arm_with_m_vfp_d16 ();
10465 initialize_tdesc_arm_with_iwmmxt ();
10466 initialize_tdesc_arm_with_vfpv2 ();
10467 initialize_tdesc_arm_with_vfpv3 ();
10468 initialize_tdesc_arm_with_neon ();
10469
10470 /* Get the number of possible sets of register names defined in opcodes. */
10471 num_disassembly_options = get_arm_regname_num_options ();
10472
10473 /* Add root prefix command for all "set arm"/"show arm" commands. */
10474 add_prefix_cmd ("arm", no_class, set_arm_command,
10475 _("Various ARM-specific commands."),
10476 &setarmcmdlist, "set arm ", 0, &setlist);
10477
10478 add_prefix_cmd ("arm", no_class, show_arm_command,
10479 _("Various ARM-specific commands."),
10480 &showarmcmdlist, "show arm ", 0, &showlist);
10481
10482 /* Sync the opcode insn printer with our register viewer. */
10483 parse_arm_disassembler_option ("reg-names-std");
10484
10485 /* Initialize the array that will be passed to
10486 add_setshow_enum_cmd(). */
10487 valid_disassembly_styles
10488 = xmalloc ((num_disassembly_options + 1) * sizeof (char *));
10489 for (i = 0; i < num_disassembly_options; i++)
10490 {
10491 numregs = get_arm_regnames (i, &setname, &setdesc, &regnames);
10492 valid_disassembly_styles[i] = setname;
10493 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc);
10494 rdptr += length;
10495 rest -= length;
10496 /* When we find the default names, tell the disassembler to use
10497 them. */
10498 if (!strcmp (setname, "std"))
10499 {
10500 disassembly_style = setname;
10501 set_arm_regname_option (i);
10502 }
10503 }
10504 /* Mark the end of valid options. */
10505 valid_disassembly_styles[num_disassembly_options] = NULL;
10506
10507 /* Create the help text. */
10508 stb = mem_fileopen ();
10509 fprintf_unfiltered (stb, "%s%s%s",
10510 _("The valid values are:\n"),
10511 regdesc,
10512 _("The default is \"std\"."));
10513 helptext = ui_file_xstrdup (stb, NULL);
10514 ui_file_delete (stb);
10515
10516 add_setshow_enum_cmd("disassembler", no_class,
10517 valid_disassembly_styles, &disassembly_style,
10518 _("Set the disassembly style."),
10519 _("Show the disassembly style."),
10520 helptext,
10521 set_disassembly_style_sfunc,
10522 NULL, /* FIXME: i18n: The disassembly style is
10523 \"%s\". */
10524 &setarmcmdlist, &showarmcmdlist);
10525
10526 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32,
10527 _("Set usage of ARM 32-bit mode."),
10528 _("Show usage of ARM 32-bit mode."),
10529 _("When off, a 26-bit PC will be used."),
10530 NULL,
10531 NULL, /* FIXME: i18n: Usage of ARM 32-bit
10532 mode is %s. */
10533 &setarmcmdlist, &showarmcmdlist);
10534
10535 /* Add a command to allow the user to force the FPU model. */
10536 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, &current_fp_model,
10537 _("Set the floating point type."),
10538 _("Show the floating point type."),
10539 _("auto - Determine the FP typefrom the OS-ABI.\n\
10540 softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\
10541 fpa - FPA co-processor (GCC compiled).\n\
10542 softvfp - Software FP with pure-endian doubles.\n\
10543 vfp - VFP co-processor."),
10544 set_fp_model_sfunc, show_fp_model,
10545 &setarmcmdlist, &showarmcmdlist);
10546
10547 /* Add a command to allow the user to force the ABI. */
10548 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string,
10549 _("Set the ABI."),
10550 _("Show the ABI."),
10551 NULL, arm_set_abi, arm_show_abi,
10552 &setarmcmdlist, &showarmcmdlist);
10553
10554 /* Add two commands to allow the user to force the assumed
10555 execution mode. */
10556 add_setshow_enum_cmd ("fallback-mode", class_support,
10557 arm_mode_strings, &arm_fallback_mode_string,
10558 _("Set the mode assumed when symbols are unavailable."),
10559 _("Show the mode assumed when symbols are unavailable."),
10560 NULL, NULL, arm_show_fallback_mode,
10561 &setarmcmdlist, &showarmcmdlist);
10562 add_setshow_enum_cmd ("force-mode", class_support,
10563 arm_mode_strings, &arm_force_mode_string,
10564 _("Set the mode assumed even when symbols are available."),
10565 _("Show the mode assumed even when symbols are available."),
10566 NULL, NULL, arm_show_force_mode,
10567 &setarmcmdlist, &showarmcmdlist);
10568
10569 /* Debugging flag. */
10570 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug,
10571 _("Set ARM debugging."),
10572 _("Show ARM debugging."),
10573 _("When on, arm-specific debugging is enabled."),
10574 NULL,
10575 NULL, /* FIXME: i18n: "ARM debugging is %s. */
10576 &setdebuglist, &showdebuglist);
10577 }
10578
10579 /* ARM-reversible process record data structures. */
10580
10581 #define ARM_INSN_SIZE_BYTES 4
10582 #define THUMB_INSN_SIZE_BYTES 2
10583 #define THUMB2_INSN_SIZE_BYTES 4
10584
10585
10586 #define INSN_S_L_BIT_NUM 20
10587
10588 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
10589 do \
10590 { \
10591 unsigned int reg_len = LENGTH; \
10592 if (reg_len) \
10593 { \
10594 REGS = XNEWVEC (uint32_t, reg_len); \
10595 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
10596 } \
10597 } \
10598 while (0)
10599
10600 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
10601 do \
10602 { \
10603 unsigned int mem_len = LENGTH; \
10604 if (mem_len) \
10605 { \
10606 MEMS = XNEWVEC (struct arm_mem_r, mem_len); \
10607 memcpy(&MEMS->len, &RECORD_BUF[0], \
10608 sizeof(struct arm_mem_r) * LENGTH); \
10609 } \
10610 } \
10611 while (0)
10612
10613 /* Checks whether insn is already recorded or yet to be decoded. (boolean expression). */
10614 #define INSN_RECORDED(ARM_RECORD) \
10615 (0 != (ARM_RECORD)->reg_rec_count || 0 != (ARM_RECORD)->mem_rec_count)
10616
10617 /* ARM memory record structure. */
10618 struct arm_mem_r
10619 {
10620 uint32_t len; /* Record length. */
10621 CORE_ADDR addr; /* Memory address. */
10622 };
10623
10624 /* ARM instruction record contains opcode of current insn
10625 and execution state (before entry to decode_insn()),
10626 contains list of to-be-modified registers and
10627 memory blocks (on return from decode_insn()). */
10628
10629 typedef struct insn_decode_record_t
10630 {
10631 struct gdbarch *gdbarch;
10632 struct regcache *regcache;
10633 CORE_ADDR this_addr; /* Address of the insn being decoded. */
10634 uint32_t arm_insn; /* Should accommodate thumb. */
10635 uint32_t cond; /* Condition code. */
10636 uint32_t opcode; /* Insn opcode. */
10637 uint32_t decode; /* Insn decode bits. */
10638 uint32_t mem_rec_count; /* No of mem records. */
10639 uint32_t reg_rec_count; /* No of reg records. */
10640 uint32_t *arm_regs; /* Registers to be saved for this record. */
10641 struct arm_mem_r *arm_mems; /* Memory to be saved for this record. */
10642 } insn_decode_record;
10643
10644
10645 /* Checks ARM SBZ and SBO mandatory fields. */
10646
10647 static int
10648 sbo_sbz (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
10649 {
10650 uint32_t ones = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
10651
10652 if (!len)
10653 return 1;
10654
10655 if (!sbo)
10656 ones = ~ones;
10657
10658 while (ones)
10659 {
10660 if (!(ones & sbo))
10661 {
10662 return 0;
10663 }
10664 ones = ones >> 1;
10665 }
10666 return 1;
10667 }
10668
10669 typedef enum
10670 {
10671 ARM_RECORD_STRH=1,
10672 ARM_RECORD_STRD
10673 } arm_record_strx_t;
10674
10675 typedef enum
10676 {
10677 ARM_RECORD=1,
10678 THUMB_RECORD,
10679 THUMB2_RECORD
10680 } record_type_t;
10681
10682
10683 static int
10684 arm_record_strx (insn_decode_record *arm_insn_r, uint32_t *record_buf,
10685 uint32_t *record_buf_mem, arm_record_strx_t str_type)
10686 {
10687
10688 struct regcache *reg_cache = arm_insn_r->regcache;
10689 ULONGEST u_regval[2]= {0};
10690
10691 uint32_t reg_src1 = 0, reg_src2 = 0;
10692 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10693 uint32_t opcode1 = 0;
10694
10695 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
10696 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
10697 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
10698
10699
10700 if (14 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
10701 {
10702 /* 1) Handle misc store, immediate offset. */
10703 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10704 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10705 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10706 regcache_raw_read_unsigned (reg_cache, reg_src1,
10707 &u_regval[0]);
10708 if (ARM_PC_REGNUM == reg_src1)
10709 {
10710 /* If R15 was used as Rn, hence current PC+8. */
10711 u_regval[0] = u_regval[0] + 8;
10712 }
10713 offset_8 = (immed_high << 4) | immed_low;
10714 /* Calculate target store address. */
10715 if (14 == arm_insn_r->opcode)
10716 {
10717 tgt_mem_addr = u_regval[0] + offset_8;
10718 }
10719 else
10720 {
10721 tgt_mem_addr = u_regval[0] - offset_8;
10722 }
10723 if (ARM_RECORD_STRH == str_type)
10724 {
10725 record_buf_mem[0] = 2;
10726 record_buf_mem[1] = tgt_mem_addr;
10727 arm_insn_r->mem_rec_count = 1;
10728 }
10729 else if (ARM_RECORD_STRD == str_type)
10730 {
10731 record_buf_mem[0] = 4;
10732 record_buf_mem[1] = tgt_mem_addr;
10733 record_buf_mem[2] = 4;
10734 record_buf_mem[3] = tgt_mem_addr + 4;
10735 arm_insn_r->mem_rec_count = 2;
10736 }
10737 }
10738 else if (12 == arm_insn_r->opcode || 8 == arm_insn_r->opcode)
10739 {
10740 /* 2) Store, register offset. */
10741 /* Get Rm. */
10742 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10743 /* Get Rn. */
10744 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10745 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10746 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10747 if (15 == reg_src2)
10748 {
10749 /* If R15 was used as Rn, hence current PC+8. */
10750 u_regval[0] = u_regval[0] + 8;
10751 }
10752 /* Calculate target store address, Rn +/- Rm, register offset. */
10753 if (12 == arm_insn_r->opcode)
10754 {
10755 tgt_mem_addr = u_regval[0] + u_regval[1];
10756 }
10757 else
10758 {
10759 tgt_mem_addr = u_regval[1] - u_regval[0];
10760 }
10761 if (ARM_RECORD_STRH == str_type)
10762 {
10763 record_buf_mem[0] = 2;
10764 record_buf_mem[1] = tgt_mem_addr;
10765 arm_insn_r->mem_rec_count = 1;
10766 }
10767 else if (ARM_RECORD_STRD == str_type)
10768 {
10769 record_buf_mem[0] = 4;
10770 record_buf_mem[1] = tgt_mem_addr;
10771 record_buf_mem[2] = 4;
10772 record_buf_mem[3] = tgt_mem_addr + 4;
10773 arm_insn_r->mem_rec_count = 2;
10774 }
10775 }
10776 else if (11 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
10777 || 2 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10778 {
10779 /* 3) Store, immediate pre-indexed. */
10780 /* 5) Store, immediate post-indexed. */
10781 immed_low = bits (arm_insn_r->arm_insn, 0, 3);
10782 immed_high = bits (arm_insn_r->arm_insn, 8, 11);
10783 offset_8 = (immed_high << 4) | immed_low;
10784 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
10785 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10786 /* Calculate target store address, Rn +/- Rm, register offset. */
10787 if (15 == arm_insn_r->opcode || 6 == arm_insn_r->opcode)
10788 {
10789 tgt_mem_addr = u_regval[0] + offset_8;
10790 }
10791 else
10792 {
10793 tgt_mem_addr = u_regval[0] - offset_8;
10794 }
10795 if (ARM_RECORD_STRH == str_type)
10796 {
10797 record_buf_mem[0] = 2;
10798 record_buf_mem[1] = tgt_mem_addr;
10799 arm_insn_r->mem_rec_count = 1;
10800 }
10801 else if (ARM_RECORD_STRD == str_type)
10802 {
10803 record_buf_mem[0] = 4;
10804 record_buf_mem[1] = tgt_mem_addr;
10805 record_buf_mem[2] = 4;
10806 record_buf_mem[3] = tgt_mem_addr + 4;
10807 arm_insn_r->mem_rec_count = 2;
10808 }
10809 /* Record Rn also as it changes. */
10810 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10811 arm_insn_r->reg_rec_count = 1;
10812 }
10813 else if (9 == arm_insn_r->opcode || 13 == arm_insn_r->opcode
10814 || 0 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10815 {
10816 /* 4) Store, register pre-indexed. */
10817 /* 6) Store, register post -indexed. */
10818 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
10819 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
10820 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
10821 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
10822 /* Calculate target store address, Rn +/- Rm, register offset. */
10823 if (13 == arm_insn_r->opcode || 4 == arm_insn_r->opcode)
10824 {
10825 tgt_mem_addr = u_regval[0] + u_regval[1];
10826 }
10827 else
10828 {
10829 tgt_mem_addr = u_regval[1] - u_regval[0];
10830 }
10831 if (ARM_RECORD_STRH == str_type)
10832 {
10833 record_buf_mem[0] = 2;
10834 record_buf_mem[1] = tgt_mem_addr;
10835 arm_insn_r->mem_rec_count = 1;
10836 }
10837 else if (ARM_RECORD_STRD == str_type)
10838 {
10839 record_buf_mem[0] = 4;
10840 record_buf_mem[1] = tgt_mem_addr;
10841 record_buf_mem[2] = 4;
10842 record_buf_mem[3] = tgt_mem_addr + 4;
10843 arm_insn_r->mem_rec_count = 2;
10844 }
10845 /* Record Rn also as it changes. */
10846 *(record_buf) = bits (arm_insn_r->arm_insn, 16, 19);
10847 arm_insn_r->reg_rec_count = 1;
10848 }
10849 return 0;
10850 }
10851
10852 /* Handling ARM extension space insns. */
10853
10854 static int
10855 arm_record_extension_space (insn_decode_record *arm_insn_r)
10856 {
10857 uint32_t ret = 0; /* Return value: -1:record failure ; 0:success */
10858 uint32_t opcode1 = 0, opcode2 = 0, insn_op1 = 0;
10859 uint32_t record_buf[8], record_buf_mem[8];
10860 uint32_t reg_src1 = 0;
10861 uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
10862 struct regcache *reg_cache = arm_insn_r->regcache;
10863 ULONGEST u_regval = 0;
10864
10865 gdb_assert (!INSN_RECORDED(arm_insn_r));
10866 /* Handle unconditional insn extension space. */
10867
10868 opcode1 = bits (arm_insn_r->arm_insn, 20, 27);
10869 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10870 if (arm_insn_r->cond)
10871 {
10872 /* PLD has no affect on architectural state, it just affects
10873 the caches. */
10874 if (5 == ((opcode1 & 0xE0) >> 5))
10875 {
10876 /* BLX(1) */
10877 record_buf[0] = ARM_PS_REGNUM;
10878 record_buf[1] = ARM_LR_REGNUM;
10879 arm_insn_r->reg_rec_count = 2;
10880 }
10881 /* STC2, LDC2, MCR2, MRC2, CDP2: <TBD>, co-processor insn. */
10882 }
10883
10884
10885 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
10886 if (3 == opcode1 && bit (arm_insn_r->arm_insn, 4))
10887 {
10888 ret = -1;
10889 /* Undefined instruction on ARM V5; need to handle if later
10890 versions define it. */
10891 }
10892
10893 opcode1 = bits (arm_insn_r->arm_insn, 24, 27);
10894 opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
10895 insn_op1 = bits (arm_insn_r->arm_insn, 20, 23);
10896
10897 /* Handle arithmetic insn extension space. */
10898 if (!opcode1 && 9 == opcode2 && 1 != arm_insn_r->cond
10899 && !INSN_RECORDED(arm_insn_r))
10900 {
10901 /* Handle MLA(S) and MUL(S). */
10902 if (0 <= insn_op1 && 3 >= insn_op1)
10903 {
10904 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10905 record_buf[1] = ARM_PS_REGNUM;
10906 arm_insn_r->reg_rec_count = 2;
10907 }
10908 else if (4 <= insn_op1 && 15 >= insn_op1)
10909 {
10910 /* Handle SMLAL(S), SMULL(S), UMLAL(S), UMULL(S). */
10911 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
10912 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10913 record_buf[2] = ARM_PS_REGNUM;
10914 arm_insn_r->reg_rec_count = 3;
10915 }
10916 }
10917
10918 opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
10919 opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
10920 insn_op1 = bits (arm_insn_r->arm_insn, 21, 22);
10921
10922 /* Handle control insn extension space. */
10923
10924 if (!opcode1 && 2 == opcode2 && !bit (arm_insn_r->arm_insn, 20)
10925 && 1 != arm_insn_r->cond && !INSN_RECORDED(arm_insn_r))
10926 {
10927 if (!bit (arm_insn_r->arm_insn,25))
10928 {
10929 if (!bits (arm_insn_r->arm_insn, 4, 7))
10930 {
10931 if ((0 == insn_op1) || (2 == insn_op1))
10932 {
10933 /* MRS. */
10934 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10935 arm_insn_r->reg_rec_count = 1;
10936 }
10937 else if (1 == insn_op1)
10938 {
10939 /* CSPR is going to be changed. */
10940 record_buf[0] = ARM_PS_REGNUM;
10941 arm_insn_r->reg_rec_count = 1;
10942 }
10943 else if (3 == insn_op1)
10944 {
10945 /* SPSR is going to be changed. */
10946 /* We need to get SPSR value, which is yet to be done. */
10947 printf_unfiltered (_("Process record does not support "
10948 "instruction 0x%0x at address %s.\n"),
10949 arm_insn_r->arm_insn,
10950 paddress (arm_insn_r->gdbarch,
10951 arm_insn_r->this_addr));
10952 return -1;
10953 }
10954 }
10955 else if (1 == bits (arm_insn_r->arm_insn, 4, 7))
10956 {
10957 if (1 == insn_op1)
10958 {
10959 /* BX. */
10960 record_buf[0] = ARM_PS_REGNUM;
10961 arm_insn_r->reg_rec_count = 1;
10962 }
10963 else if (3 == insn_op1)
10964 {
10965 /* CLZ. */
10966 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
10967 arm_insn_r->reg_rec_count = 1;
10968 }
10969 }
10970 else if (3 == bits (arm_insn_r->arm_insn, 4, 7))
10971 {
10972 /* BLX. */
10973 record_buf[0] = ARM_PS_REGNUM;
10974 record_buf[1] = ARM_LR_REGNUM;
10975 arm_insn_r->reg_rec_count = 2;
10976 }
10977 else if (5 == bits (arm_insn_r->arm_insn, 4, 7))
10978 {
10979 /* QADD, QSUB, QDADD, QDSUB */
10980 record_buf[0] = ARM_PS_REGNUM;
10981 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
10982 arm_insn_r->reg_rec_count = 2;
10983 }
10984 else if (7 == bits (arm_insn_r->arm_insn, 4, 7))
10985 {
10986 /* BKPT. */
10987 record_buf[0] = ARM_PS_REGNUM;
10988 record_buf[1] = ARM_LR_REGNUM;
10989 arm_insn_r->reg_rec_count = 2;
10990
10991 /* Save SPSR also;how? */
10992 printf_unfiltered (_("Process record does not support "
10993 "instruction 0x%0x at address %s.\n"),
10994 arm_insn_r->arm_insn,
10995 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
10996 return -1;
10997 }
10998 else if(8 == bits (arm_insn_r->arm_insn, 4, 7)
10999 || 10 == bits (arm_insn_r->arm_insn, 4, 7)
11000 || 12 == bits (arm_insn_r->arm_insn, 4, 7)
11001 || 14 == bits (arm_insn_r->arm_insn, 4, 7)
11002 )
11003 {
11004 if (0 == insn_op1 || 1 == insn_op1)
11005 {
11006 /* SMLA<x><y>, SMLAW<y>, SMULW<y>. */
11007 /* We dont do optimization for SMULW<y> where we
11008 need only Rd. */
11009 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11010 record_buf[1] = ARM_PS_REGNUM;
11011 arm_insn_r->reg_rec_count = 2;
11012 }
11013 else if (2 == insn_op1)
11014 {
11015 /* SMLAL<x><y>. */
11016 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11017 record_buf[1] = bits (arm_insn_r->arm_insn, 16, 19);
11018 arm_insn_r->reg_rec_count = 2;
11019 }
11020 else if (3 == insn_op1)
11021 {
11022 /* SMUL<x><y>. */
11023 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11024 arm_insn_r->reg_rec_count = 1;
11025 }
11026 }
11027 }
11028 else
11029 {
11030 /* MSR : immediate form. */
11031 if (1 == insn_op1)
11032 {
11033 /* CSPR is going to be changed. */
11034 record_buf[0] = ARM_PS_REGNUM;
11035 arm_insn_r->reg_rec_count = 1;
11036 }
11037 else if (3 == insn_op1)
11038 {
11039 /* SPSR is going to be changed. */
11040 /* we need to get SPSR value, which is yet to be done */
11041 printf_unfiltered (_("Process record does not support "
11042 "instruction 0x%0x at address %s.\n"),
11043 arm_insn_r->arm_insn,
11044 paddress (arm_insn_r->gdbarch,
11045 arm_insn_r->this_addr));
11046 return -1;
11047 }
11048 }
11049 }
11050
11051 opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
11052 opcode2 = bits (arm_insn_r->arm_insn, 20, 24);
11053 insn_op1 = bits (arm_insn_r->arm_insn, 5, 6);
11054
11055 /* Handle load/store insn extension space. */
11056
11057 if (!opcode1 && bit (arm_insn_r->arm_insn, 7)
11058 && bit (arm_insn_r->arm_insn, 4) && 1 != arm_insn_r->cond
11059 && !INSN_RECORDED(arm_insn_r))
11060 {
11061 /* SWP/SWPB. */
11062 if (0 == insn_op1)
11063 {
11064 /* These insn, changes register and memory as well. */
11065 /* SWP or SWPB insn. */
11066 /* Get memory address given by Rn. */
11067 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11068 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11069 /* SWP insn ?, swaps word. */
11070 if (8 == arm_insn_r->opcode)
11071 {
11072 record_buf_mem[0] = 4;
11073 }
11074 else
11075 {
11076 /* SWPB insn, swaps only byte. */
11077 record_buf_mem[0] = 1;
11078 }
11079 record_buf_mem[1] = u_regval;
11080 arm_insn_r->mem_rec_count = 1;
11081 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11082 arm_insn_r->reg_rec_count = 1;
11083 }
11084 else if (1 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11085 {
11086 /* STRH. */
11087 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11088 ARM_RECORD_STRH);
11089 }
11090 else if (2 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11091 {
11092 /* LDRD. */
11093 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11094 record_buf[1] = record_buf[0] + 1;
11095 arm_insn_r->reg_rec_count = 2;
11096 }
11097 else if (3 == insn_op1 && !bit (arm_insn_r->arm_insn, 20))
11098 {
11099 /* STRD. */
11100 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11101 ARM_RECORD_STRD);
11102 }
11103 else if (bit (arm_insn_r->arm_insn, 20) && insn_op1 <= 3)
11104 {
11105 /* LDRH, LDRSB, LDRSH. */
11106 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11107 arm_insn_r->reg_rec_count = 1;
11108 }
11109
11110 }
11111
11112 opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
11113 if (24 == opcode1 && bit (arm_insn_r->arm_insn, 21)
11114 && !INSN_RECORDED(arm_insn_r))
11115 {
11116 ret = -1;
11117 /* Handle coprocessor insn extension space. */
11118 }
11119
11120 /* To be done for ARMv5 and later; as of now we return -1. */
11121 if (-1 == ret)
11122 printf_unfiltered (_("Process record does not support instruction x%0x "
11123 "at address %s.\n"),arm_insn_r->arm_insn,
11124 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11125
11126
11127 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11128 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11129
11130 return ret;
11131 }
11132
11133 /* Handling opcode 000 insns. */
11134
11135 static int
11136 arm_record_data_proc_misc_ld_str (insn_decode_record *arm_insn_r)
11137 {
11138 struct regcache *reg_cache = arm_insn_r->regcache;
11139 uint32_t record_buf[8], record_buf_mem[8];
11140 ULONGEST u_regval[2] = {0};
11141
11142 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11143 uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
11144 uint32_t opcode1 = 0;
11145
11146 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11147 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11148 opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
11149
11150 /* Data processing insn /multiply insn. */
11151 if (9 == arm_insn_r->decode
11152 && ((4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11153 || (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)))
11154 {
11155 /* Handle multiply instructions. */
11156 /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
11157 if (0 == arm_insn_r->opcode || 1 == arm_insn_r->opcode)
11158 {
11159 /* Handle MLA and MUL. */
11160 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11161 record_buf[1] = ARM_PS_REGNUM;
11162 arm_insn_r->reg_rec_count = 2;
11163 }
11164 else if (4 <= arm_insn_r->opcode && 7 >= arm_insn_r->opcode)
11165 {
11166 /* Handle SMLAL, SMULL, UMLAL, UMULL. */
11167 record_buf[0] = bits (arm_insn_r->arm_insn, 16, 19);
11168 record_buf[1] = bits (arm_insn_r->arm_insn, 12, 15);
11169 record_buf[2] = ARM_PS_REGNUM;
11170 arm_insn_r->reg_rec_count = 3;
11171 }
11172 }
11173 else if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11174 && (11 == arm_insn_r->decode || 13 == arm_insn_r->decode))
11175 {
11176 /* Handle misc load insns, as 20th bit (L = 1). */
11177 /* LDR insn has a capability to do branching, if
11178 MOV LR, PC is precceded by LDR insn having Rn as R15
11179 in that case, it emulates branch and link insn, and hence we
11180 need to save CSPR and PC as well. I am not sure this is right
11181 place; as opcode = 010 LDR insn make this happen, if R15 was
11182 used. */
11183 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11184 if (15 != reg_dest)
11185 {
11186 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11187 arm_insn_r->reg_rec_count = 1;
11188 }
11189 else
11190 {
11191 record_buf[0] = reg_dest;
11192 record_buf[1] = ARM_PS_REGNUM;
11193 arm_insn_r->reg_rec_count = 2;
11194 }
11195 }
11196 else if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11197 && sbo_sbz (arm_insn_r->arm_insn, 5, 12, 0)
11198 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11199 && 2 == bits (arm_insn_r->arm_insn, 20, 21))
11200 {
11201 /* Handle MSR insn. */
11202 if (9 == arm_insn_r->opcode)
11203 {
11204 /* CSPR is going to be changed. */
11205 record_buf[0] = ARM_PS_REGNUM;
11206 arm_insn_r->reg_rec_count = 1;
11207 }
11208 else
11209 {
11210 /* SPSR is going to be changed. */
11211 /* How to read SPSR value? */
11212 printf_unfiltered (_("Process record does not support instruction "
11213 "0x%0x at address %s.\n"),
11214 arm_insn_r->arm_insn,
11215 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11216 return -1;
11217 }
11218 }
11219 else if (9 == arm_insn_r->decode
11220 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11221 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11222 {
11223 /* Handling SWP, SWPB. */
11224 /* These insn, changes register and memory as well. */
11225 /* SWP or SWPB insn. */
11226
11227 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11228 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11229 /* SWP insn ?, swaps word. */
11230 if (8 == arm_insn_r->opcode)
11231 {
11232 record_buf_mem[0] = 4;
11233 }
11234 else
11235 {
11236 /* SWPB insn, swaps only byte. */
11237 record_buf_mem[0] = 1;
11238 }
11239 record_buf_mem[1] = u_regval[0];
11240 arm_insn_r->mem_rec_count = 1;
11241 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11242 arm_insn_r->reg_rec_count = 1;
11243 }
11244 else if (3 == arm_insn_r->decode && 0x12 == opcode1
11245 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11246 {
11247 /* Handle BLX, branch and link/exchange. */
11248 if (9 == arm_insn_r->opcode)
11249 {
11250 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
11251 and R14 stores the return address. */
11252 record_buf[0] = ARM_PS_REGNUM;
11253 record_buf[1] = ARM_LR_REGNUM;
11254 arm_insn_r->reg_rec_count = 2;
11255 }
11256 }
11257 else if (7 == arm_insn_r->decode && 0x12 == opcode1)
11258 {
11259 /* Handle enhanced software breakpoint insn, BKPT. */
11260 /* CPSR is changed to be executed in ARM state, disabling normal
11261 interrupts, entering abort mode. */
11262 /* According to high vector configuration PC is set. */
11263 /* user hit breakpoint and type reverse, in
11264 that case, we need to go back with previous CPSR and
11265 Program Counter. */
11266 record_buf[0] = ARM_PS_REGNUM;
11267 record_buf[1] = ARM_LR_REGNUM;
11268 arm_insn_r->reg_rec_count = 2;
11269
11270 /* Save SPSR also; how? */
11271 printf_unfiltered (_("Process record does not support instruction "
11272 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11273 paddress (arm_insn_r->gdbarch,
11274 arm_insn_r->this_addr));
11275 return -1;
11276 }
11277 else if (11 == arm_insn_r->decode
11278 && !bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11279 {
11280 /* Handle enhanced store insns and DSP insns (e.g. LDRD). */
11281
11282 /* Handle str(x) insn */
11283 arm_record_strx(arm_insn_r, &record_buf[0], &record_buf_mem[0],
11284 ARM_RECORD_STRH);
11285 }
11286 else if (1 == arm_insn_r->decode && 0x12 == opcode1
11287 && sbo_sbz (arm_insn_r->arm_insn, 9, 12, 1))
11288 {
11289 /* Handle BX, branch and link/exchange. */
11290 /* Branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
11291 record_buf[0] = ARM_PS_REGNUM;
11292 arm_insn_r->reg_rec_count = 1;
11293 }
11294 else if (1 == arm_insn_r->decode && 0x16 == opcode1
11295 && sbo_sbz (arm_insn_r->arm_insn, 9, 4, 1)
11296 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1))
11297 {
11298 /* Count leading zeros: CLZ. */
11299 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11300 arm_insn_r->reg_rec_count = 1;
11301 }
11302 else if (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)
11303 && (8 == arm_insn_r->opcode || 10 == arm_insn_r->opcode)
11304 && sbo_sbz (arm_insn_r->arm_insn, 17, 4, 1)
11305 && sbo_sbz (arm_insn_r->arm_insn, 1, 12, 0)
11306 )
11307 {
11308 /* Handle MRS insn. */
11309 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11310 arm_insn_r->reg_rec_count = 1;
11311 }
11312 else if (arm_insn_r->opcode <= 15)
11313 {
11314 /* Normal data processing insns. */
11315 /* Out of 11 shifter operands mode, all the insn modifies destination
11316 register, which is specified by 13-16 decode. */
11317 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11318 record_buf[1] = ARM_PS_REGNUM;
11319 arm_insn_r->reg_rec_count = 2;
11320 }
11321 else
11322 {
11323 return -1;
11324 }
11325
11326 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11327 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11328 return 0;
11329 }
11330
11331 /* Handling opcode 001 insns. */
11332
11333 static int
11334 arm_record_data_proc_imm (insn_decode_record *arm_insn_r)
11335 {
11336 uint32_t record_buf[8], record_buf_mem[8];
11337
11338 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11339 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11340
11341 if ((9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode)
11342 && 2 == bits (arm_insn_r->arm_insn, 20, 21)
11343 && sbo_sbz (arm_insn_r->arm_insn, 13, 4, 1)
11344 )
11345 {
11346 /* Handle MSR insn. */
11347 if (9 == arm_insn_r->opcode)
11348 {
11349 /* CSPR is going to be changed. */
11350 record_buf[0] = ARM_PS_REGNUM;
11351 arm_insn_r->reg_rec_count = 1;
11352 }
11353 else
11354 {
11355 /* SPSR is going to be changed. */
11356 }
11357 }
11358 else if (arm_insn_r->opcode <= 15)
11359 {
11360 /* Normal data processing insns. */
11361 /* Out of 11 shifter operands mode, all the insn modifies destination
11362 register, which is specified by 13-16 decode. */
11363 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11364 record_buf[1] = ARM_PS_REGNUM;
11365 arm_insn_r->reg_rec_count = 2;
11366 }
11367 else
11368 {
11369 return -1;
11370 }
11371
11372 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11373 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11374 return 0;
11375 }
11376
11377 /* Handling opcode 010 insns. */
11378
11379 static int
11380 arm_record_ld_st_imm_offset (insn_decode_record *arm_insn_r)
11381 {
11382 struct regcache *reg_cache = arm_insn_r->regcache;
11383
11384 uint32_t reg_src1 = 0 , reg_dest = 0;
11385 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11386 uint32_t record_buf[8], record_buf_mem[8];
11387
11388 ULONGEST u_regval = 0;
11389
11390 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11391 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11392
11393 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11394 {
11395 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11396 /* LDR insn has a capability to do branching, if
11397 MOV LR, PC is precedded by LDR insn having Rn as R15
11398 in that case, it emulates branch and link insn, and hence we
11399 need to save CSPR and PC as well. */
11400 if (ARM_PC_REGNUM != reg_dest)
11401 {
11402 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11403 arm_insn_r->reg_rec_count = 1;
11404 }
11405 else
11406 {
11407 record_buf[0] = reg_dest;
11408 record_buf[1] = ARM_PS_REGNUM;
11409 arm_insn_r->reg_rec_count = 2;
11410 }
11411 }
11412 else
11413 {
11414 /* Store, immediate offset, immediate pre-indexed,
11415 immediate post-indexed. */
11416 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11417 offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
11418 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
11419 /* U == 1 */
11420 if (bit (arm_insn_r->arm_insn, 23))
11421 {
11422 tgt_mem_addr = u_regval + offset_12;
11423 }
11424 else
11425 {
11426 tgt_mem_addr = u_regval - offset_12;
11427 }
11428
11429 switch (arm_insn_r->opcode)
11430 {
11431 /* STR. */
11432 case 8:
11433 case 12:
11434 /* STR. */
11435 case 9:
11436 case 13:
11437 /* STRT. */
11438 case 1:
11439 case 5:
11440 /* STR. */
11441 case 4:
11442 case 0:
11443 record_buf_mem[0] = 4;
11444 break;
11445
11446 /* STRB. */
11447 case 10:
11448 case 14:
11449 /* STRB. */
11450 case 11:
11451 case 15:
11452 /* STRBT. */
11453 case 3:
11454 case 7:
11455 /* STRB. */
11456 case 2:
11457 case 6:
11458 record_buf_mem[0] = 1;
11459 break;
11460
11461 default:
11462 gdb_assert_not_reached ("no decoding pattern found");
11463 break;
11464 }
11465 record_buf_mem[1] = tgt_mem_addr;
11466 arm_insn_r->mem_rec_count = 1;
11467
11468 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11469 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11470 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11471 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11472 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11473 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11474 )
11475 {
11476 /* We are handling pre-indexed mode; post-indexed mode;
11477 where Rn is going to be changed. */
11478 record_buf[0] = reg_src1;
11479 arm_insn_r->reg_rec_count = 1;
11480 }
11481 }
11482
11483 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11484 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11485 return 0;
11486 }
11487
11488 /* Handling opcode 011 insns. */
11489
11490 static int
11491 arm_record_ld_st_reg_offset (insn_decode_record *arm_insn_r)
11492 {
11493 struct regcache *reg_cache = arm_insn_r->regcache;
11494
11495 uint32_t shift_imm = 0;
11496 uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
11497 uint32_t offset_12 = 0, tgt_mem_addr = 0;
11498 uint32_t record_buf[8], record_buf_mem[8];
11499
11500 LONGEST s_word;
11501 ULONGEST u_regval[2];
11502
11503 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
11504 arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
11505
11506 /* Handle enhanced store insns and LDRD DSP insn,
11507 order begins according to addressing modes for store insns
11508 STRH insn. */
11509
11510 /* LDR or STR? */
11511 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11512 {
11513 reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
11514 /* LDR insn has a capability to do branching, if
11515 MOV LR, PC is precedded by LDR insn having Rn as R15
11516 in that case, it emulates branch and link insn, and hence we
11517 need to save CSPR and PC as well. */
11518 if (15 != reg_dest)
11519 {
11520 record_buf[0] = bits (arm_insn_r->arm_insn, 12, 15);
11521 arm_insn_r->reg_rec_count = 1;
11522 }
11523 else
11524 {
11525 record_buf[0] = reg_dest;
11526 record_buf[1] = ARM_PS_REGNUM;
11527 arm_insn_r->reg_rec_count = 2;
11528 }
11529 }
11530 else
11531 {
11532 if (! bits (arm_insn_r->arm_insn, 4, 11))
11533 {
11534 /* Store insn, register offset and register pre-indexed,
11535 register post-indexed. */
11536 /* Get Rm. */
11537 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11538 /* Get Rn. */
11539 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11540 regcache_raw_read_unsigned (reg_cache, reg_src1
11541 , &u_regval[0]);
11542 regcache_raw_read_unsigned (reg_cache, reg_src2
11543 , &u_regval[1]);
11544 if (15 == reg_src2)
11545 {
11546 /* If R15 was used as Rn, hence current PC+8. */
11547 /* Pre-indexed mode doesnt reach here ; illegal insn. */
11548 u_regval[0] = u_regval[0] + 8;
11549 }
11550 /* Calculate target store address, Rn +/- Rm, register offset. */
11551 /* U == 1. */
11552 if (bit (arm_insn_r->arm_insn, 23))
11553 {
11554 tgt_mem_addr = u_regval[0] + u_regval[1];
11555 }
11556 else
11557 {
11558 tgt_mem_addr = u_regval[1] - u_regval[0];
11559 }
11560
11561 switch (arm_insn_r->opcode)
11562 {
11563 /* STR. */
11564 case 8:
11565 case 12:
11566 /* STR. */
11567 case 9:
11568 case 13:
11569 /* STRT. */
11570 case 1:
11571 case 5:
11572 /* STR. */
11573 case 0:
11574 case 4:
11575 record_buf_mem[0] = 4;
11576 break;
11577
11578 /* STRB. */
11579 case 10:
11580 case 14:
11581 /* STRB. */
11582 case 11:
11583 case 15:
11584 /* STRBT. */
11585 case 3:
11586 case 7:
11587 /* STRB. */
11588 case 2:
11589 case 6:
11590 record_buf_mem[0] = 1;
11591 break;
11592
11593 default:
11594 gdb_assert_not_reached ("no decoding pattern found");
11595 break;
11596 }
11597 record_buf_mem[1] = tgt_mem_addr;
11598 arm_insn_r->mem_rec_count = 1;
11599
11600 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11601 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11602 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11603 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11604 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11605 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11606 )
11607 {
11608 /* Rn is going to be changed in pre-indexed mode and
11609 post-indexed mode as well. */
11610 record_buf[0] = reg_src2;
11611 arm_insn_r->reg_rec_count = 1;
11612 }
11613 }
11614 else
11615 {
11616 /* Store insn, scaled register offset; scaled pre-indexed. */
11617 offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
11618 /* Get Rm. */
11619 reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
11620 /* Get Rn. */
11621 reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
11622 /* Get shift_imm. */
11623 shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
11624 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11625 regcache_raw_read_signed (reg_cache, reg_src1, &s_word);
11626 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11627 /* Offset_12 used as shift. */
11628 switch (offset_12)
11629 {
11630 case 0:
11631 /* Offset_12 used as index. */
11632 offset_12 = u_regval[0] << shift_imm;
11633 break;
11634
11635 case 1:
11636 offset_12 = (!shift_imm)?0:u_regval[0] >> shift_imm;
11637 break;
11638
11639 case 2:
11640 if (!shift_imm)
11641 {
11642 if (bit (u_regval[0], 31))
11643 {
11644 offset_12 = 0xFFFFFFFF;
11645 }
11646 else
11647 {
11648 offset_12 = 0;
11649 }
11650 }
11651 else
11652 {
11653 /* This is arithmetic shift. */
11654 offset_12 = s_word >> shift_imm;
11655 }
11656 break;
11657
11658 case 3:
11659 if (!shift_imm)
11660 {
11661 regcache_raw_read_unsigned (reg_cache, ARM_PS_REGNUM,
11662 &u_regval[1]);
11663 /* Get C flag value and shift it by 31. */
11664 offset_12 = (((bit (u_regval[1], 29)) << 31) \
11665 | (u_regval[0]) >> 1);
11666 }
11667 else
11668 {
11669 offset_12 = (u_regval[0] >> shift_imm) \
11670 | (u_regval[0] <<
11671 (sizeof(uint32_t) - shift_imm));
11672 }
11673 break;
11674
11675 default:
11676 gdb_assert_not_reached ("no decoding pattern found");
11677 break;
11678 }
11679
11680 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
11681 /* bit U set. */
11682 if (bit (arm_insn_r->arm_insn, 23))
11683 {
11684 tgt_mem_addr = u_regval[1] + offset_12;
11685 }
11686 else
11687 {
11688 tgt_mem_addr = u_regval[1] - offset_12;
11689 }
11690
11691 switch (arm_insn_r->opcode)
11692 {
11693 /* STR. */
11694 case 8:
11695 case 12:
11696 /* STR. */
11697 case 9:
11698 case 13:
11699 /* STRT. */
11700 case 1:
11701 case 5:
11702 /* STR. */
11703 case 0:
11704 case 4:
11705 record_buf_mem[0] = 4;
11706 break;
11707
11708 /* STRB. */
11709 case 10:
11710 case 14:
11711 /* STRB. */
11712 case 11:
11713 case 15:
11714 /* STRBT. */
11715 case 3:
11716 case 7:
11717 /* STRB. */
11718 case 2:
11719 case 6:
11720 record_buf_mem[0] = 1;
11721 break;
11722
11723 default:
11724 gdb_assert_not_reached ("no decoding pattern found");
11725 break;
11726 }
11727 record_buf_mem[1] = tgt_mem_addr;
11728 arm_insn_r->mem_rec_count = 1;
11729
11730 if (9 == arm_insn_r->opcode || 11 == arm_insn_r->opcode
11731 || 13 == arm_insn_r->opcode || 15 == arm_insn_r->opcode
11732 || 0 == arm_insn_r->opcode || 2 == arm_insn_r->opcode
11733 || 4 == arm_insn_r->opcode || 6 == arm_insn_r->opcode
11734 || 1 == arm_insn_r->opcode || 3 == arm_insn_r->opcode
11735 || 5 == arm_insn_r->opcode || 7 == arm_insn_r->opcode
11736 )
11737 {
11738 /* Rn is going to be changed in register scaled pre-indexed
11739 mode,and scaled post indexed mode. */
11740 record_buf[0] = reg_src2;
11741 arm_insn_r->reg_rec_count = 1;
11742 }
11743 }
11744 }
11745
11746 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11747 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11748 return 0;
11749 }
11750
11751 /* Handling opcode 100 insns. */
11752
11753 static int
11754 arm_record_ld_st_multiple (insn_decode_record *arm_insn_r)
11755 {
11756 struct regcache *reg_cache = arm_insn_r->regcache;
11757
11758 uint32_t register_list[16] = {0}, register_count = 0, register_bits = 0;
11759 uint32_t reg_src1 = 0, addr_mode = 0, no_of_regs = 0;
11760 uint32_t start_address = 0, index = 0;
11761 uint32_t record_buf[24], record_buf_mem[48];
11762
11763 ULONGEST u_regval[2] = {0};
11764
11765 /* This mode is exclusively for load and store multiple. */
11766 /* Handle incremenrt after/before and decrment after.before mode;
11767 Rn is changing depending on W bit, but as of now we store Rn too
11768 without optimization. */
11769
11770 if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
11771 {
11772 /* LDM (1,2,3) where LDM (3) changes CPSR too. */
11773
11774 if (bit (arm_insn_r->arm_insn, 20) && !bit (arm_insn_r->arm_insn, 22))
11775 {
11776 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11777 no_of_regs = 15;
11778 }
11779 else
11780 {
11781 register_bits = bits (arm_insn_r->arm_insn, 0, 14);
11782 no_of_regs = 14;
11783 }
11784 /* Get Rn. */
11785 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11786 while (register_bits)
11787 {
11788 if (register_bits & 0x00000001)
11789 register_list[register_count++] = 1;
11790 register_bits = register_bits >> 1;
11791 }
11792
11793 /* Extra space for Base Register and CPSR; wihtout optimization. */
11794 record_buf[register_count] = reg_src1;
11795 record_buf[register_count + 1] = ARM_PS_REGNUM;
11796 arm_insn_r->reg_rec_count = register_count + 2;
11797
11798 for (register_count = 0; register_count < no_of_regs; register_count++)
11799 {
11800 if (register_list[register_count])
11801 {
11802 /* Register_count gives total no of registers
11803 and dually working as reg number. */
11804 record_buf[index] = register_count;
11805 index++;
11806 }
11807 }
11808
11809 }
11810 else
11811 {
11812 /* It handles both STM(1) and STM(2). */
11813 addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
11814
11815 register_bits = bits (arm_insn_r->arm_insn, 0, 15);
11816 /* Get Rn. */
11817 reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
11818 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
11819 while (register_bits)
11820 {
11821 if (register_bits & 0x00000001)
11822 register_count++;
11823 register_bits = register_bits >> 1;
11824 }
11825
11826 switch (addr_mode)
11827 {
11828 /* Decrement after. */
11829 case 0:
11830 start_address = (u_regval[0]) - (register_count * 4) + 4;
11831 arm_insn_r->mem_rec_count = register_count;
11832 while (register_count)
11833 {
11834 record_buf_mem[(register_count * 2) - 1] = start_address;
11835 record_buf_mem[(register_count * 2) - 2] = 4;
11836 start_address = start_address + 4;
11837 register_count--;
11838 }
11839 break;
11840
11841 /* Increment after. */
11842 case 1:
11843 start_address = u_regval[0];
11844 arm_insn_r->mem_rec_count = register_count;
11845 while (register_count)
11846 {
11847 record_buf_mem[(register_count * 2) - 1] = start_address;
11848 record_buf_mem[(register_count * 2) - 2] = 4;
11849 start_address = start_address + 4;
11850 register_count--;
11851 }
11852 break;
11853
11854 /* Decrement before. */
11855 case 2:
11856
11857 start_address = (u_regval[0]) - (register_count * 4);
11858 arm_insn_r->mem_rec_count = register_count;
11859 while (register_count)
11860 {
11861 record_buf_mem[(register_count * 2) - 1] = start_address;
11862 record_buf_mem[(register_count * 2) - 2] = 4;
11863 start_address = start_address + 4;
11864 register_count--;
11865 }
11866 break;
11867
11868 /* Increment before. */
11869 case 3:
11870 start_address = u_regval[0] + 4;
11871 arm_insn_r->mem_rec_count = register_count;
11872 while (register_count)
11873 {
11874 record_buf_mem[(register_count * 2) - 1] = start_address;
11875 record_buf_mem[(register_count * 2) - 2] = 4;
11876 start_address = start_address + 4;
11877 register_count--;
11878 }
11879 break;
11880
11881 default:
11882 gdb_assert_not_reached ("no decoding pattern found");
11883 break;
11884 }
11885
11886 /* Base register also changes; based on condition and W bit. */
11887 /* We save it anyway without optimization. */
11888 record_buf[0] = reg_src1;
11889 arm_insn_r->reg_rec_count = 1;
11890 }
11891
11892 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11893 MEM_ALLOC (arm_insn_r->arm_mems, arm_insn_r->mem_rec_count, record_buf_mem);
11894 return 0;
11895 }
11896
11897 /* Handling opcode 101 insns. */
11898
11899 static int
11900 arm_record_b_bl (insn_decode_record *arm_insn_r)
11901 {
11902 uint32_t record_buf[8];
11903
11904 /* Handle B, BL, BLX(1) insns. */
11905 /* B simply branches so we do nothing here. */
11906 /* Note: BLX(1) doesnt fall here but instead it falls into
11907 extension space. */
11908 if (bit (arm_insn_r->arm_insn, 24))
11909 {
11910 record_buf[0] = ARM_LR_REGNUM;
11911 arm_insn_r->reg_rec_count = 1;
11912 }
11913
11914 REG_ALLOC (arm_insn_r->arm_regs, arm_insn_r->reg_rec_count, record_buf);
11915
11916 return 0;
11917 }
11918
11919 /* Handling opcode 110 insns. */
11920
11921 static int
11922 arm_record_coproc (insn_decode_record *arm_insn_r)
11923 {
11924 printf_unfiltered (_("Process record does not support instruction "
11925 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11926 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11927
11928 return -1;
11929 }
11930
11931 /* Handling opcode 111 insns. */
11932
11933 static int
11934 arm_record_coproc_data_proc (insn_decode_record *arm_insn_r)
11935 {
11936 struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
11937 struct regcache *reg_cache = arm_insn_r->regcache;
11938 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
11939
11940 /* Handle SWI insn; system call would be handled over here. */
11941
11942 arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
11943 if (15 == arm_insn_r->opcode)
11944 {
11945 /* Handle arm syscall insn. */
11946 if (tdep->arm_swi_record != NULL)
11947 {
11948 ret = tdep->arm_swi_record(reg_cache);
11949 }
11950 else
11951 {
11952 printf_unfiltered (_("no syscall record support\n"));
11953 ret = -1;
11954 }
11955 }
11956
11957 printf_unfiltered (_("Process record does not support instruction "
11958 "0x%0x at address %s.\n"),arm_insn_r->arm_insn,
11959 paddress (arm_insn_r->gdbarch, arm_insn_r->this_addr));
11960 return ret;
11961 }
11962
11963 /* Handling opcode 000 insns. */
11964
11965 static int
11966 thumb_record_shift_add_sub (insn_decode_record *thumb_insn_r)
11967 {
11968 uint32_t record_buf[8];
11969 uint32_t reg_src1 = 0;
11970
11971 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
11972
11973 record_buf[0] = ARM_PS_REGNUM;
11974 record_buf[1] = reg_src1;
11975 thumb_insn_r->reg_rec_count = 2;
11976
11977 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11978
11979 return 0;
11980 }
11981
11982
11983 /* Handling opcode 001 insns. */
11984
11985 static int
11986 thumb_record_add_sub_cmp_mov (insn_decode_record *thumb_insn_r)
11987 {
11988 uint32_t record_buf[8];
11989 uint32_t reg_src1 = 0;
11990
11991 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
11992
11993 record_buf[0] = ARM_PS_REGNUM;
11994 record_buf[1] = reg_src1;
11995 thumb_insn_r->reg_rec_count = 2;
11996
11997 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
11998
11999 return 0;
12000 }
12001
12002 /* Handling opcode 010 insns. */
12003
12004 static int
12005 thumb_record_ld_st_reg_offset (insn_decode_record *thumb_insn_r)
12006 {
12007 struct regcache *reg_cache = thumb_insn_r->regcache;
12008 uint32_t record_buf[8], record_buf_mem[8];
12009
12010 uint32_t reg_src1 = 0, reg_src2 = 0;
12011 uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
12012
12013 ULONGEST u_regval[2] = {0};
12014
12015 opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
12016
12017 if (bit (thumb_insn_r->arm_insn, 12))
12018 {
12019 /* Handle load/store register offset. */
12020 opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
12021 if (opcode2 >= 12 && opcode2 <= 15)
12022 {
12023 /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
12024 reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
12025 record_buf[0] = reg_src1;
12026 thumb_insn_r->reg_rec_count = 1;
12027 }
12028 else if (opcode2 >= 8 && opcode2 <= 10)
12029 {
12030 /* STR(2), STRB(2), STRH(2) . */
12031 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12032 reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
12033 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval[0]);
12034 regcache_raw_read_unsigned (reg_cache, reg_src2, &u_regval[1]);
12035 if (8 == opcode2)
12036 record_buf_mem[0] = 4; /* STR (2). */
12037 else if (10 == opcode2)
12038 record_buf_mem[0] = 1; /* STRB (2). */
12039 else if (9 == opcode2)
12040 record_buf_mem[0] = 2; /* STRH (2). */
12041 record_buf_mem[1] = u_regval[0] + u_regval[1];
12042 thumb_insn_r->mem_rec_count = 1;
12043 }
12044 }
12045 else if (bit (thumb_insn_r->arm_insn, 11))
12046 {
12047 /* Handle load from literal pool. */
12048 /* LDR(3). */
12049 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12050 record_buf[0] = reg_src1;
12051 thumb_insn_r->reg_rec_count = 1;
12052 }
12053 else if (opcode1)
12054 {
12055 opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
12056 opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
12057 if ((3 == opcode2) && (!opcode3))
12058 {
12059 /* Branch with exchange. */
12060 record_buf[0] = ARM_PS_REGNUM;
12061 thumb_insn_r->reg_rec_count = 1;
12062 }
12063 else
12064 {
12065 /* Format 8; special data processing insns. */
12066 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12067 record_buf[0] = ARM_PS_REGNUM;
12068 record_buf[1] = reg_src1;
12069 thumb_insn_r->reg_rec_count = 2;
12070 }
12071 }
12072 else
12073 {
12074 /* Format 5; data processing insns. */
12075 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12076 if (bit (thumb_insn_r->arm_insn, 7))
12077 {
12078 reg_src1 = reg_src1 + 8;
12079 }
12080 record_buf[0] = ARM_PS_REGNUM;
12081 record_buf[1] = reg_src1;
12082 thumb_insn_r->reg_rec_count = 2;
12083 }
12084
12085 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12086 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12087 record_buf_mem);
12088
12089 return 0;
12090 }
12091
12092 /* Handling opcode 001 insns. */
12093
12094 static int
12095 thumb_record_ld_st_imm_offset (insn_decode_record *thumb_insn_r)
12096 {
12097 struct regcache *reg_cache = thumb_insn_r->regcache;
12098 uint32_t record_buf[8], record_buf_mem[8];
12099
12100 uint32_t reg_src1 = 0;
12101 uint32_t opcode = 0, immed_5 = 0;
12102
12103 ULONGEST u_regval = 0;
12104
12105 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12106
12107 if (opcode)
12108 {
12109 /* LDR(1). */
12110 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12111 record_buf[0] = reg_src1;
12112 thumb_insn_r->reg_rec_count = 1;
12113 }
12114 else
12115 {
12116 /* STR(1). */
12117 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12118 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12119 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12120 record_buf_mem[0] = 4;
12121 record_buf_mem[1] = u_regval + (immed_5 * 4);
12122 thumb_insn_r->mem_rec_count = 1;
12123 }
12124
12125 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12126 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12127 record_buf_mem);
12128
12129 return 0;
12130 }
12131
12132 /* Handling opcode 100 insns. */
12133
12134 static int
12135 thumb_record_ld_st_stack (insn_decode_record *thumb_insn_r)
12136 {
12137 struct regcache *reg_cache = thumb_insn_r->regcache;
12138 uint32_t record_buf[8], record_buf_mem[8];
12139
12140 uint32_t reg_src1 = 0;
12141 uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
12142
12143 ULONGEST u_regval = 0;
12144
12145 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12146
12147 if (3 == opcode)
12148 {
12149 /* LDR(4). */
12150 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12151 record_buf[0] = reg_src1;
12152 thumb_insn_r->reg_rec_count = 1;
12153 }
12154 else if (1 == opcode)
12155 {
12156 /* LDRH(1). */
12157 reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
12158 record_buf[0] = reg_src1;
12159 thumb_insn_r->reg_rec_count = 1;
12160 }
12161 else if (2 == opcode)
12162 {
12163 /* STR(3). */
12164 immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
12165 regcache_raw_read_unsigned (reg_cache, ARM_SP_REGNUM, &u_regval);
12166 record_buf_mem[0] = 4;
12167 record_buf_mem[1] = u_regval + (immed_8 * 4);
12168 thumb_insn_r->mem_rec_count = 1;
12169 }
12170 else if (0 == opcode)
12171 {
12172 /* STRH(1). */
12173 immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
12174 reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
12175 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12176 record_buf_mem[0] = 2;
12177 record_buf_mem[1] = u_regval + (immed_5 * 2);
12178 thumb_insn_r->mem_rec_count = 1;
12179 }
12180
12181 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12182 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12183 record_buf_mem);
12184
12185 return 0;
12186 }
12187
12188 /* Handling opcode 101 insns. */
12189
12190 static int
12191 thumb_record_misc (insn_decode_record *thumb_insn_r)
12192 {
12193 struct regcache *reg_cache = thumb_insn_r->regcache;
12194
12195 uint32_t opcode = 0, opcode1 = 0, opcode2 = 0;
12196 uint32_t register_bits = 0, register_count = 0;
12197 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12198 uint32_t record_buf[24], record_buf_mem[48];
12199 uint32_t reg_src1;
12200
12201 ULONGEST u_regval = 0;
12202
12203 opcode = bits (thumb_insn_r->arm_insn, 11, 12);
12204 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12205 opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
12206
12207 if (14 == opcode2)
12208 {
12209 /* POP. */
12210 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12211 while (register_bits)
12212 {
12213 if (register_bits & 0x00000001)
12214 register_list[register_count++] = 1;
12215 register_bits = register_bits >> 1;
12216 }
12217 record_buf[register_count] = ARM_PS_REGNUM;
12218 record_buf[register_count + 1] = ARM_SP_REGNUM;
12219 thumb_insn_r->reg_rec_count = register_count + 2;
12220 for (register_count = 0; register_count < 8; register_count++)
12221 {
12222 if (register_list[register_count])
12223 {
12224 record_buf[index] = register_count;
12225 index++;
12226 }
12227 }
12228 }
12229 else if (10 == opcode2)
12230 {
12231 /* PUSH. */
12232 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12233 regcache_raw_read_unsigned (reg_cache, ARM_PC_REGNUM, &u_regval);
12234 while (register_bits)
12235 {
12236 if (register_bits & 0x00000001)
12237 register_count++;
12238 register_bits = register_bits >> 1;
12239 }
12240 start_address = u_regval - \
12241 (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count));
12242 thumb_insn_r->mem_rec_count = register_count;
12243 while (register_count)
12244 {
12245 record_buf_mem[(register_count * 2) - 1] = start_address;
12246 record_buf_mem[(register_count * 2) - 2] = 4;
12247 start_address = start_address + 4;
12248 register_count--;
12249 }
12250 record_buf[0] = ARM_SP_REGNUM;
12251 thumb_insn_r->reg_rec_count = 1;
12252 }
12253 else if (0x1E == opcode1)
12254 {
12255 /* BKPT insn. */
12256 /* Handle enhanced software breakpoint insn, BKPT. */
12257 /* CPSR is changed to be executed in ARM state, disabling normal
12258 interrupts, entering abort mode. */
12259 /* According to high vector configuration PC is set. */
12260 /* User hits breakpoint and type reverse, in that case, we need to go back with
12261 previous CPSR and Program Counter. */
12262 record_buf[0] = ARM_PS_REGNUM;
12263 record_buf[1] = ARM_LR_REGNUM;
12264 thumb_insn_r->reg_rec_count = 2;
12265 /* We need to save SPSR value, which is not yet done. */
12266 printf_unfiltered (_("Process record does not support instruction "
12267 "0x%0x at address %s.\n"),
12268 thumb_insn_r->arm_insn,
12269 paddress (thumb_insn_r->gdbarch,
12270 thumb_insn_r->this_addr));
12271 return -1;
12272 }
12273 else if ((0 == opcode) || (1 == opcode))
12274 {
12275 /* ADD(5), ADD(6). */
12276 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12277 record_buf[0] = reg_src1;
12278 thumb_insn_r->reg_rec_count = 1;
12279 }
12280 else if (2 == opcode)
12281 {
12282 /* ADD(7), SUB(4). */
12283 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12284 record_buf[0] = ARM_SP_REGNUM;
12285 thumb_insn_r->reg_rec_count = 1;
12286 }
12287
12288 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12289 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12290 record_buf_mem);
12291
12292 return 0;
12293 }
12294
12295 /* Handling opcode 110 insns. */
12296
12297 static int
12298 thumb_record_ldm_stm_swi (insn_decode_record *thumb_insn_r)
12299 {
12300 struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
12301 struct regcache *reg_cache = thumb_insn_r->regcache;
12302
12303 uint32_t ret = 0; /* function return value: -1:record failure ; 0:success */
12304 uint32_t reg_src1 = 0;
12305 uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
12306 uint32_t register_list[8] = {0}, index = 0, start_address = 0;
12307 uint32_t record_buf[24], record_buf_mem[48];
12308
12309 ULONGEST u_regval = 0;
12310
12311 opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
12312 opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
12313
12314 if (1 == opcode2)
12315 {
12316
12317 /* LDMIA. */
12318 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12319 /* Get Rn. */
12320 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12321 while (register_bits)
12322 {
12323 if (register_bits & 0x00000001)
12324 register_list[register_count++] = 1;
12325 register_bits = register_bits >> 1;
12326 }
12327 record_buf[register_count] = reg_src1;
12328 thumb_insn_r->reg_rec_count = register_count + 1;
12329 for (register_count = 0; register_count < 8; register_count++)
12330 {
12331 if (register_list[register_count])
12332 {
12333 record_buf[index] = register_count;
12334 index++;
12335 }
12336 }
12337 }
12338 else if (0 == opcode2)
12339 {
12340 /* It handles both STMIA. */
12341 register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
12342 /* Get Rn. */
12343 reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
12344 regcache_raw_read_unsigned (reg_cache, reg_src1, &u_regval);
12345 while (register_bits)
12346 {
12347 if (register_bits & 0x00000001)
12348 register_count++;
12349 register_bits = register_bits >> 1;
12350 }
12351 start_address = u_regval;
12352 thumb_insn_r->mem_rec_count = register_count;
12353 while (register_count)
12354 {
12355 record_buf_mem[(register_count * 2) - 1] = start_address;
12356 record_buf_mem[(register_count * 2) - 2] = 4;
12357 start_address = start_address + 4;
12358 register_count--;
12359 }
12360 }
12361 else if (0x1F == opcode1)
12362 {
12363 /* Handle arm syscall insn. */
12364 if (tdep->arm_swi_record != NULL)
12365 {
12366 ret = tdep->arm_swi_record(reg_cache);
12367 }
12368 else
12369 {
12370 printf_unfiltered (_("no syscall record support\n"));
12371 return -1;
12372 }
12373 }
12374
12375 /* B (1), conditional branch is automatically taken care in process_record,
12376 as PC is saved there. */
12377
12378 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12379 MEM_ALLOC (thumb_insn_r->arm_mems, thumb_insn_r->mem_rec_count,
12380 record_buf_mem);
12381
12382 return ret;
12383 }
12384
12385 /* Handling opcode 111 insns. */
12386
12387 static int
12388 thumb_record_branch (insn_decode_record *thumb_insn_r)
12389 {
12390 uint32_t record_buf[8];
12391 uint32_t bits_h = 0;
12392
12393 bits_h = bits (thumb_insn_r->arm_insn, 11, 12);
12394
12395 if (2 == bits_h || 3 == bits_h)
12396 {
12397 /* BL */
12398 record_buf[0] = ARM_LR_REGNUM;
12399 thumb_insn_r->reg_rec_count = 1;
12400 }
12401 else if (1 == bits_h)
12402 {
12403 /* BLX(1). */
12404 record_buf[0] = ARM_PS_REGNUM;
12405 record_buf[1] = ARM_LR_REGNUM;
12406 thumb_insn_r->reg_rec_count = 2;
12407 }
12408
12409 /* B(2) is automatically taken care in process_record, as PC is
12410 saved there. */
12411
12412 REG_ALLOC (thumb_insn_r->arm_regs, thumb_insn_r->reg_rec_count, record_buf);
12413
12414 return 0;
12415 }
12416
12417
12418 /* Extracts arm/thumb/thumb2 insn depending on the size, and returns 0 on success
12419 and positive val on fauilure. */
12420
12421 static int
12422 extract_arm_insn (insn_decode_record *insn_record, uint32_t insn_size)
12423 {
12424 gdb_byte buf[insn_size];
12425
12426 memset (&buf[0], 0, insn_size);
12427
12428 if (target_read_memory (insn_record->this_addr, &buf[0], insn_size))
12429 return 1;
12430 insn_record->arm_insn = (uint32_t) extract_unsigned_integer (&buf[0],
12431 insn_size,
12432 gdbarch_byte_order (insn_record->gdbarch));
12433 return 0;
12434 }
12435
12436 typedef int (*sti_arm_hdl_fp_t) (insn_decode_record*);
12437
12438 /* Decode arm/thumb insn depending on condition cods and opcodes; and
12439 dispatch it. */
12440
12441 static int
12442 decode_insn (insn_decode_record *arm_record, record_type_t record_type,
12443 uint32_t insn_size)
12444 {
12445
12446 /* (Starting from numerical 0); bits 25, 26, 27 decodes type of arm instruction. */
12447 static const sti_arm_hdl_fp_t const arm_handle_insn[8] =
12448 {
12449 arm_record_data_proc_misc_ld_str, /* 000. */
12450 arm_record_data_proc_imm, /* 001. */
12451 arm_record_ld_st_imm_offset, /* 010. */
12452 arm_record_ld_st_reg_offset, /* 011. */
12453 arm_record_ld_st_multiple, /* 100. */
12454 arm_record_b_bl, /* 101. */
12455 arm_record_coproc, /* 110. */
12456 arm_record_coproc_data_proc /* 111. */
12457 };
12458
12459 /* (Starting from numerical 0); bits 13,14,15 decodes type of thumb instruction. */
12460 static const sti_arm_hdl_fp_t const thumb_handle_insn[8] =
12461 { \
12462 thumb_record_shift_add_sub, /* 000. */
12463 thumb_record_add_sub_cmp_mov, /* 001. */
12464 thumb_record_ld_st_reg_offset, /* 010. */
12465 thumb_record_ld_st_imm_offset, /* 011. */
12466 thumb_record_ld_st_stack, /* 100. */
12467 thumb_record_misc, /* 101. */
12468 thumb_record_ldm_stm_swi, /* 110. */
12469 thumb_record_branch /* 111. */
12470 };
12471
12472 uint32_t ret = 0; /* return value: negative:failure 0:success. */
12473 uint32_t insn_id = 0;
12474
12475 if (extract_arm_insn (arm_record, insn_size))
12476 {
12477 if (record_debug)
12478 {
12479 printf_unfiltered (_("Process record: error reading memory at "
12480 "addr %s len = %d.\n"),
12481 paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
12482 }
12483 return -1;
12484 }
12485 else if (ARM_RECORD == record_type)
12486 {
12487 arm_record->cond = bits (arm_record->arm_insn, 28, 31);
12488 insn_id = bits (arm_record->arm_insn, 25, 27);
12489 ret = arm_record_extension_space (arm_record);
12490 /* If this insn has fallen into extension space
12491 then we need not decode it anymore. */
12492 if (ret != -1 && !INSN_RECORDED(arm_record))
12493 {
12494 ret = arm_handle_insn[insn_id] (arm_record);
12495 }
12496 }
12497 else if (THUMB_RECORD == record_type)
12498 {
12499 /* As thumb does not have condition codes, we set negative. */
12500 arm_record->cond = -1;
12501 insn_id = bits (arm_record->arm_insn, 13, 15);
12502 ret = thumb_handle_insn[insn_id] (arm_record);
12503 }
12504 else if (THUMB2_RECORD == record_type)
12505 {
12506 printf_unfiltered (_("Process record doesnt support thumb32 instruction "
12507 "0x%0x at address %s.\n"),arm_record->arm_insn,
12508 paddress (arm_record->gdbarch,
12509 arm_record->this_addr));
12510 ret = -1;
12511 }
12512 else
12513 {
12514 /* Throw assertion. */
12515 gdb_assert_not_reached ("not a valid instruction, could not decode");
12516 }
12517
12518 return ret;
12519 }
12520
12521
12522 /* Cleans up local record registers and memory allocations. */
12523
12524 static void
12525 deallocate_reg_mem (insn_decode_record *record)
12526 {
12527 xfree (record->arm_regs);
12528 xfree (record->arm_mems);
12529 }
12530
12531
12532 /* Parse the current instruction and record the values of the registers and
12533 memory that will be changed in current instruction to record_arch_list".
12534 Return -1 if something is wrong. */
12535
12536 int
12537 arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
12538 CORE_ADDR insn_addr)
12539 {
12540
12541 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
12542 uint32_t no_of_rec = 0;
12543 uint32_t ret = 0; /* return value: -1:record failure ; 0:success */
12544 ULONGEST t_bit = 0, insn_id = 0;
12545
12546 ULONGEST u_regval = 0;
12547
12548 insn_decode_record arm_record;
12549
12550 memset (&arm_record, 0, sizeof (insn_decode_record));
12551 arm_record.regcache = regcache;
12552 arm_record.this_addr = insn_addr;
12553 arm_record.gdbarch = gdbarch;
12554
12555
12556 if (record_debug > 1)
12557 {
12558 fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
12559 "addr = %s\n",
12560 paddress (gdbarch, arm_record.this_addr));
12561 }
12562
12563 if (extract_arm_insn (&arm_record, 2))
12564 {
12565 if (record_debug)
12566 {
12567 printf_unfiltered (_("Process record: error reading memory at "
12568 "addr %s len = %d.\n"),
12569 paddress (arm_record.gdbarch,
12570 arm_record.this_addr), 2);
12571 }
12572 return -1;
12573 }
12574
12575 /* Check the insn, whether it is thumb or arm one. */
12576
12577 t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
12578 regcache_raw_read_unsigned (arm_record.regcache, ARM_PS_REGNUM, &u_regval);
12579
12580
12581 if (!(u_regval & t_bit))
12582 {
12583 /* We are decoding arm insn. */
12584 ret = decode_insn (&arm_record, ARM_RECORD, ARM_INSN_SIZE_BYTES);
12585 }
12586 else
12587 {
12588 insn_id = bits (arm_record.arm_insn, 11, 15);
12589 /* is it thumb2 insn? */
12590 if ((0x1D == insn_id) || (0x1E == insn_id) || (0x1F == insn_id))
12591 {
12592 ret = decode_insn (&arm_record, THUMB2_RECORD,
12593 THUMB2_INSN_SIZE_BYTES);
12594 }
12595 else
12596 {
12597 /* We are decoding thumb insn. */
12598 ret = decode_insn (&arm_record, THUMB_RECORD, THUMB_INSN_SIZE_BYTES);
12599 }
12600 }
12601
12602 if (0 == ret)
12603 {
12604 /* Record registers. */
12605 record_full_arch_list_add_reg (arm_record.regcache, ARM_PC_REGNUM);
12606 if (arm_record.arm_regs)
12607 {
12608 for (no_of_rec = 0; no_of_rec < arm_record.reg_rec_count; no_of_rec++)
12609 {
12610 if (record_full_arch_list_add_reg
12611 (arm_record.regcache , arm_record.arm_regs[no_of_rec]))
12612 ret = -1;
12613 }
12614 }
12615 /* Record memories. */
12616 if (arm_record.arm_mems)
12617 {
12618 for (no_of_rec = 0; no_of_rec < arm_record.mem_rec_count; no_of_rec++)
12619 {
12620 if (record_full_arch_list_add_mem
12621 ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
12622 arm_record.arm_mems[no_of_rec].len))
12623 ret = -1;
12624 }
12625 }
12626
12627 if (record_full_arch_list_add_end ())
12628 ret = -1;
12629 }
12630
12631
12632 deallocate_reg_mem (&arm_record);
12633
12634 return ret;
12635 }
12636
This page took 0.346167 seconds and 4 git commands to generate.