cb955bda0fe9c1da49a3ff5bb3bf5e7a7625f137
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46
47 static void target_info (char *, int);
48
49 static void kill_or_be_killed (int);
50
51 static void default_terminal_info (char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
57
58 static int nosymbol (char *, CORE_ADDR *);
59
60 static void tcomplain (void) ATTR_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static void nosupport_runtime (void);
77
78 static LONGEST default_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST current_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex, gdb_byte *readbuf,
87 const gdb_byte *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static LONGEST target_xfer_partial (struct target_ops *ops,
91 enum target_object object,
92 const char *annex,
93 void *readbuf, const void *writebuf,
94 ULONGEST offset, LONGEST len);
95
96 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
97 ptid_t ptid);
98
99 static void init_dummy_target (void);
100
101 static struct target_ops debug_target;
102
103 static void debug_to_open (char *, int);
104
105 static void debug_to_prepare_to_store (struct regcache *);
106
107 static void debug_to_files_info (struct target_ops *);
108
109 static int debug_to_insert_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_remove_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_can_use_hw_breakpoint (int, int, int);
116
117 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
121 struct bp_target_info *);
122
123 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
124
125 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
126
127 static int debug_to_stopped_by_watchpoint (void);
128
129 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
130
131 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
132 CORE_ADDR, CORE_ADDR, int);
133
134 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
151
152 static int debug_to_can_run (void);
153
154 static void debug_to_notice_signals (ptid_t);
155
156 static void debug_to_stop (ptid_t);
157
158 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
159 wierd and mysterious ways. Putting the variable here lets those
160 wierd and mysterious ways keep building while they are being
161 converted to the inferior inheritance structure. */
162 struct target_ops deprecated_child_ops;
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_index;
170 unsigned target_struct_allocsize;
171 #define DEFAULT_ALLOCSIZE 10
172
173 /* The initial current target, so that there is always a semi-valid
174 current target. */
175
176 static struct target_ops dummy_target;
177
178 /* Top of target stack. */
179
180 static struct target_ops *target_stack;
181
182 /* The target structure we are currently using to talk to a process
183 or file or whatever "inferior" we have. */
184
185 struct target_ops current_target;
186
187 /* Command list for target. */
188
189 static struct cmd_list_element *targetlist = NULL;
190
191 /* Nonzero if we should trust readonly sections from the
192 executable when reading memory. */
193
194 static int trust_readonly = 0;
195
196 /* Nonzero if we should show true memory content including
197 memory breakpoint inserted by gdb. */
198
199 static int show_memory_breakpoints = 0;
200
201 /* Non-zero if we want to see trace of target level stuff. */
202
203 static int targetdebug = 0;
204 static void
205 show_targetdebug (struct ui_file *file, int from_tty,
206 struct cmd_list_element *c, const char *value)
207 {
208 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
209 }
210
211 static void setup_target_debug (void);
212
213 /* The option sets this. */
214 static int stack_cache_enabled_p_1 = 1;
215 /* And set_stack_cache_enabled_p updates this.
216 The reason for the separation is so that we don't flush the cache for
217 on->on transitions. */
218 static int stack_cache_enabled_p = 1;
219
220 /* This is called *after* the stack-cache has been set.
221 Flush the cache for off->on and on->off transitions.
222 There's no real need to flush the cache for on->off transitions,
223 except cleanliness. */
224
225 static void
226 set_stack_cache_enabled_p (char *args, int from_tty,
227 struct cmd_list_element *c)
228 {
229 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
230 target_dcache_invalidate ();
231
232 stack_cache_enabled_p = stack_cache_enabled_p_1;
233 }
234
235 static void
236 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
237 struct cmd_list_element *c, const char *value)
238 {
239 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
240 }
241
242 /* Cache of memory operations, to speed up remote access. */
243 static DCACHE *target_dcache;
244
245 /* Invalidate the target dcache. */
246
247 void
248 target_dcache_invalidate (void)
249 {
250 dcache_invalidate (target_dcache);
251 }
252
253 /* The user just typed 'target' without the name of a target. */
254
255 static void
256 target_command (char *arg, int from_tty)
257 {
258 fputs_filtered ("Argument required (target name). Try `help target'\n",
259 gdb_stdout);
260 }
261
262 /* Default target_has_* methods for process_stratum targets. */
263
264 int
265 default_child_has_all_memory (struct target_ops *ops)
266 {
267 /* If no inferior selected, then we can't read memory here. */
268 if (ptid_equal (inferior_ptid, null_ptid))
269 return 0;
270
271 return 1;
272 }
273
274 int
275 default_child_has_memory (struct target_ops *ops)
276 {
277 /* If no inferior selected, then we can't read memory here. */
278 if (ptid_equal (inferior_ptid, null_ptid))
279 return 0;
280
281 return 1;
282 }
283
284 int
285 default_child_has_stack (struct target_ops *ops)
286 {
287 /* If no inferior selected, there's no stack. */
288 if (ptid_equal (inferior_ptid, null_ptid))
289 return 0;
290
291 return 1;
292 }
293
294 int
295 default_child_has_registers (struct target_ops *ops)
296 {
297 /* Can't read registers from no inferior. */
298 if (ptid_equal (inferior_ptid, null_ptid))
299 return 0;
300
301 return 1;
302 }
303
304 int
305 default_child_has_execution (struct target_ops *ops)
306 {
307 /* If there's no thread selected, then we can't make it run through
308 hoops. */
309 if (ptid_equal (inferior_ptid, null_ptid))
310 return 0;
311
312 return 1;
313 }
314
315
316 int
317 target_has_all_memory_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_all_memory (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_memory_1 (void)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_memory (t))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_stack_1 (void)
342 {
343 struct target_ops *t;
344
345 for (t = current_target.beneath; t != NULL; t = t->beneath)
346 if (t->to_has_stack (t))
347 return 1;
348
349 return 0;
350 }
351
352 int
353 target_has_registers_1 (void)
354 {
355 struct target_ops *t;
356
357 for (t = current_target.beneath; t != NULL; t = t->beneath)
358 if (t->to_has_registers (t))
359 return 1;
360
361 return 0;
362 }
363
364 int
365 target_has_execution_1 (void)
366 {
367 struct target_ops *t;
368
369 for (t = current_target.beneath; t != NULL; t = t->beneath)
370 if (t->to_has_execution (t))
371 return 1;
372
373 return 0;
374 }
375
376 /* Add a possible target architecture to the list. */
377
378 void
379 add_target (struct target_ops *t)
380 {
381 /* Provide default values for all "must have" methods. */
382 if (t->to_xfer_partial == NULL)
383 t->to_xfer_partial = default_xfer_partial;
384
385 if (t->to_has_all_memory == NULL)
386 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
387
388 if (t->to_has_memory == NULL)
389 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
390
391 if (t->to_has_stack == NULL)
392 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
393
394 if (t->to_has_registers == NULL)
395 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
396
397 if (t->to_has_execution == NULL)
398 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
399
400 if (!target_structs)
401 {
402 target_struct_allocsize = DEFAULT_ALLOCSIZE;
403 target_structs = (struct target_ops **) xmalloc
404 (target_struct_allocsize * sizeof (*target_structs));
405 }
406 if (target_struct_size >= target_struct_allocsize)
407 {
408 target_struct_allocsize *= 2;
409 target_structs = (struct target_ops **)
410 xrealloc ((char *) target_structs,
411 target_struct_allocsize * sizeof (*target_structs));
412 }
413 target_structs[target_struct_size++] = t;
414
415 if (targetlist == NULL)
416 add_prefix_cmd ("target", class_run, target_command, _("\
417 Connect to a target machine or process.\n\
418 The first argument is the type or protocol of the target machine.\n\
419 Remaining arguments are interpreted by the target protocol. For more\n\
420 information on the arguments for a particular protocol, type\n\
421 `help target ' followed by the protocol name."),
422 &targetlist, "target ", 0, &cmdlist);
423 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
424 }
425
426 /* Stub functions */
427
428 void
429 target_ignore (void)
430 {
431 }
432
433 void
434 target_kill (void)
435 {
436 struct target_ops *t;
437
438 for (t = current_target.beneath; t != NULL; t = t->beneath)
439 if (t->to_kill != NULL)
440 {
441 if (targetdebug)
442 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
443
444 t->to_kill (t);
445 return;
446 }
447
448 noprocess ();
449 }
450
451 void
452 target_load (char *arg, int from_tty)
453 {
454 target_dcache_invalidate ();
455 (*current_target.to_load) (arg, from_tty);
456 }
457
458 void
459 target_create_inferior (char *exec_file, char *args,
460 char **env, int from_tty)
461 {
462 struct target_ops *t;
463 for (t = current_target.beneath; t != NULL; t = t->beneath)
464 {
465 if (t->to_create_inferior != NULL)
466 {
467 t->to_create_inferior (t, exec_file, args, env, from_tty);
468 if (targetdebug)
469 fprintf_unfiltered (gdb_stdlog,
470 "target_create_inferior (%s, %s, xxx, %d)\n",
471 exec_file, args, from_tty);
472 return;
473 }
474 }
475
476 internal_error (__FILE__, __LINE__,
477 "could not find a target to create inferior");
478 }
479
480 void
481 target_terminal_inferior (void)
482 {
483 /* A background resume (``run&'') should leave GDB in control of the
484 terminal. */
485 if (target_is_async_p () && !sync_execution)
486 return;
487
488 /* If GDB is resuming the inferior in the foreground, install
489 inferior's terminal modes. */
490 (*current_target.to_terminal_inferior) ();
491 }
492
493 static int
494 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
495 struct target_ops *t)
496 {
497 errno = EIO; /* Can't read/write this location */
498 return 0; /* No bytes handled */
499 }
500
501 static void
502 tcomplain (void)
503 {
504 error (_("You can't do that when your target is `%s'"),
505 current_target.to_shortname);
506 }
507
508 void
509 noprocess (void)
510 {
511 error (_("You can't do that without a process to debug."));
512 }
513
514 static int
515 nosymbol (char *name, CORE_ADDR *addrp)
516 {
517 return 1; /* Symbol does not exist in target env */
518 }
519
520 static void
521 nosupport_runtime (void)
522 {
523 if (ptid_equal (inferior_ptid, null_ptid))
524 noprocess ();
525 else
526 error (_("No run-time support for this"));
527 }
528
529
530 static void
531 default_terminal_info (char *args, int from_tty)
532 {
533 printf_unfiltered (_("No saved terminal information.\n"));
534 }
535
536 /* This is the default target_create_inferior and target_attach function.
537 If the current target is executing, it asks whether to kill it off.
538 If this function returns without calling error(), it has killed off
539 the target, and the operation should be attempted. */
540
541 static void
542 kill_or_be_killed (int from_tty)
543 {
544 if (target_has_execution)
545 {
546 printf_unfiltered (_("You are already running a program:\n"));
547 target_files_info ();
548 if (query (_("Kill it? ")))
549 {
550 target_kill ();
551 if (target_has_execution)
552 error (_("Killing the program did not help."));
553 return;
554 }
555 else
556 {
557 error (_("Program not killed."));
558 }
559 }
560 tcomplain ();
561 }
562
563 /* A default implementation for the to_get_ada_task_ptid target method.
564
565 This function builds the PTID by using both LWP and TID as part of
566 the PTID lwp and tid elements. The pid used is the pid of the
567 inferior_ptid. */
568
569 static ptid_t
570 default_get_ada_task_ptid (long lwp, long tid)
571 {
572 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
573 }
574
575 /* Go through the target stack from top to bottom, copying over zero
576 entries in current_target, then filling in still empty entries. In
577 effect, we are doing class inheritance through the pushed target
578 vectors.
579
580 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
581 is currently implemented, is that it discards any knowledge of
582 which target an inherited method originally belonged to.
583 Consequently, new new target methods should instead explicitly and
584 locally search the target stack for the target that can handle the
585 request. */
586
587 static void
588 update_current_target (void)
589 {
590 struct target_ops *t;
591
592 /* First, reset current's contents. */
593 memset (&current_target, 0, sizeof (current_target));
594
595 #define INHERIT(FIELD, TARGET) \
596 if (!current_target.FIELD) \
597 current_target.FIELD = (TARGET)->FIELD
598
599 for (t = target_stack; t; t = t->beneath)
600 {
601 INHERIT (to_shortname, t);
602 INHERIT (to_longname, t);
603 INHERIT (to_doc, t);
604 /* Do not inherit to_open. */
605 /* Do not inherit to_close. */
606 /* Do not inherit to_attach. */
607 INHERIT (to_post_attach, t);
608 INHERIT (to_attach_no_wait, t);
609 /* Do not inherit to_detach. */
610 /* Do not inherit to_disconnect. */
611 /* Do not inherit to_resume. */
612 /* Do not inherit to_wait. */
613 /* Do not inherit to_fetch_registers. */
614 /* Do not inherit to_store_registers. */
615 INHERIT (to_prepare_to_store, t);
616 INHERIT (deprecated_xfer_memory, t);
617 INHERIT (to_files_info, t);
618 INHERIT (to_insert_breakpoint, t);
619 INHERIT (to_remove_breakpoint, t);
620 INHERIT (to_can_use_hw_breakpoint, t);
621 INHERIT (to_insert_hw_breakpoint, t);
622 INHERIT (to_remove_hw_breakpoint, t);
623 INHERIT (to_insert_watchpoint, t);
624 INHERIT (to_remove_watchpoint, t);
625 INHERIT (to_stopped_data_address, t);
626 INHERIT (to_have_steppable_watchpoint, t);
627 INHERIT (to_have_continuable_watchpoint, t);
628 INHERIT (to_stopped_by_watchpoint, t);
629 INHERIT (to_watchpoint_addr_within_range, t);
630 INHERIT (to_region_ok_for_hw_watchpoint, t);
631 INHERIT (to_terminal_init, t);
632 INHERIT (to_terminal_inferior, t);
633 INHERIT (to_terminal_ours_for_output, t);
634 INHERIT (to_terminal_ours, t);
635 INHERIT (to_terminal_save_ours, t);
636 INHERIT (to_terminal_info, t);
637 /* Do not inherit to_kill. */
638 INHERIT (to_load, t);
639 INHERIT (to_lookup_symbol, t);
640 /* Do no inherit to_create_inferior. */
641 INHERIT (to_post_startup_inferior, t);
642 INHERIT (to_acknowledge_created_inferior, t);
643 INHERIT (to_insert_fork_catchpoint, t);
644 INHERIT (to_remove_fork_catchpoint, t);
645 INHERIT (to_insert_vfork_catchpoint, t);
646 INHERIT (to_remove_vfork_catchpoint, t);
647 /* Do not inherit to_follow_fork. */
648 INHERIT (to_insert_exec_catchpoint, t);
649 INHERIT (to_remove_exec_catchpoint, t);
650 INHERIT (to_has_exited, t);
651 /* Do not inherit to_mourn_inferiour. */
652 INHERIT (to_can_run, t);
653 INHERIT (to_notice_signals, t);
654 /* Do not inherit to_thread_alive. */
655 /* Do not inherit to_find_new_threads. */
656 /* Do not inherit to_pid_to_str. */
657 INHERIT (to_extra_thread_info, t);
658 INHERIT (to_stop, t);
659 /* Do not inherit to_xfer_partial. */
660 INHERIT (to_rcmd, t);
661 INHERIT (to_pid_to_exec_file, t);
662 INHERIT (to_log_command, t);
663 INHERIT (to_stratum, t);
664 /* Do not inherit to_has_all_memory */
665 /* Do not inherit to_has_memory */
666 /* Do not inherit to_has_stack */
667 /* Do not inherit to_has_registers */
668 /* Do not inherit to_has_execution */
669 INHERIT (to_has_thread_control, t);
670 INHERIT (to_can_async_p, t);
671 INHERIT (to_is_async_p, t);
672 INHERIT (to_async, t);
673 INHERIT (to_async_mask, t);
674 INHERIT (to_find_memory_regions, t);
675 INHERIT (to_make_corefile_notes, t);
676 /* Do not inherit to_get_thread_local_address. */
677 INHERIT (to_can_execute_reverse, t);
678 INHERIT (to_thread_architecture, t);
679 /* Do not inherit to_read_description. */
680 INHERIT (to_get_ada_task_ptid, t);
681 /* Do not inherit to_search_memory. */
682 INHERIT (to_supports_multi_process, t);
683 INHERIT (to_magic, t);
684 /* Do not inherit to_memory_map. */
685 /* Do not inherit to_flash_erase. */
686 /* Do not inherit to_flash_done. */
687 }
688 #undef INHERIT
689
690 /* Clean up a target struct so it no longer has any zero pointers in
691 it. Some entries are defaulted to a method that print an error,
692 others are hard-wired to a standard recursive default. */
693
694 #define de_fault(field, value) \
695 if (!current_target.field) \
696 current_target.field = value
697
698 de_fault (to_open,
699 (void (*) (char *, int))
700 tcomplain);
701 de_fault (to_close,
702 (void (*) (int))
703 target_ignore);
704 de_fault (to_post_attach,
705 (void (*) (int))
706 target_ignore);
707 de_fault (to_prepare_to_store,
708 (void (*) (struct regcache *))
709 noprocess);
710 de_fault (deprecated_xfer_memory,
711 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
712 nomemory);
713 de_fault (to_files_info,
714 (void (*) (struct target_ops *))
715 target_ignore);
716 de_fault (to_insert_breakpoint,
717 memory_insert_breakpoint);
718 de_fault (to_remove_breakpoint,
719 memory_remove_breakpoint);
720 de_fault (to_can_use_hw_breakpoint,
721 (int (*) (int, int, int))
722 return_zero);
723 de_fault (to_insert_hw_breakpoint,
724 (int (*) (struct gdbarch *, struct bp_target_info *))
725 return_minus_one);
726 de_fault (to_remove_hw_breakpoint,
727 (int (*) (struct gdbarch *, struct bp_target_info *))
728 return_minus_one);
729 de_fault (to_insert_watchpoint,
730 (int (*) (CORE_ADDR, int, int))
731 return_minus_one);
732 de_fault (to_remove_watchpoint,
733 (int (*) (CORE_ADDR, int, int))
734 return_minus_one);
735 de_fault (to_stopped_by_watchpoint,
736 (int (*) (void))
737 return_zero);
738 de_fault (to_stopped_data_address,
739 (int (*) (struct target_ops *, CORE_ADDR *))
740 return_zero);
741 de_fault (to_watchpoint_addr_within_range,
742 default_watchpoint_addr_within_range);
743 de_fault (to_region_ok_for_hw_watchpoint,
744 default_region_ok_for_hw_watchpoint);
745 de_fault (to_terminal_init,
746 (void (*) (void))
747 target_ignore);
748 de_fault (to_terminal_inferior,
749 (void (*) (void))
750 target_ignore);
751 de_fault (to_terminal_ours_for_output,
752 (void (*) (void))
753 target_ignore);
754 de_fault (to_terminal_ours,
755 (void (*) (void))
756 target_ignore);
757 de_fault (to_terminal_save_ours,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_terminal_info,
761 default_terminal_info);
762 de_fault (to_load,
763 (void (*) (char *, int))
764 tcomplain);
765 de_fault (to_lookup_symbol,
766 (int (*) (char *, CORE_ADDR *))
767 nosymbol);
768 de_fault (to_post_startup_inferior,
769 (void (*) (ptid_t))
770 target_ignore);
771 de_fault (to_acknowledge_created_inferior,
772 (void (*) (int))
773 target_ignore);
774 de_fault (to_insert_fork_catchpoint,
775 (void (*) (int))
776 tcomplain);
777 de_fault (to_remove_fork_catchpoint,
778 (int (*) (int))
779 tcomplain);
780 de_fault (to_insert_vfork_catchpoint,
781 (void (*) (int))
782 tcomplain);
783 de_fault (to_remove_vfork_catchpoint,
784 (int (*) (int))
785 tcomplain);
786 de_fault (to_insert_exec_catchpoint,
787 (void (*) (int))
788 tcomplain);
789 de_fault (to_remove_exec_catchpoint,
790 (int (*) (int))
791 tcomplain);
792 de_fault (to_has_exited,
793 (int (*) (int, int, int *))
794 return_zero);
795 de_fault (to_can_run,
796 return_zero);
797 de_fault (to_notice_signals,
798 (void (*) (ptid_t))
799 target_ignore);
800 de_fault (to_extra_thread_info,
801 (char *(*) (struct thread_info *))
802 return_zero);
803 de_fault (to_stop,
804 (void (*) (ptid_t))
805 target_ignore);
806 current_target.to_xfer_partial = current_xfer_partial;
807 de_fault (to_rcmd,
808 (void (*) (char *, struct ui_file *))
809 tcomplain);
810 de_fault (to_pid_to_exec_file,
811 (char *(*) (int))
812 return_zero);
813 de_fault (to_async,
814 (void (*) (void (*) (enum inferior_event_type, void*), void*))
815 tcomplain);
816 de_fault (to_async_mask,
817 (int (*) (int))
818 return_one);
819 de_fault (to_thread_architecture,
820 default_thread_architecture);
821 current_target.to_read_description = NULL;
822 de_fault (to_get_ada_task_ptid,
823 (ptid_t (*) (long, long))
824 default_get_ada_task_ptid);
825 de_fault (to_supports_multi_process,
826 (int (*) (void))
827 return_zero);
828 #undef de_fault
829
830 /* Finally, position the target-stack beneath the squashed
831 "current_target". That way code looking for a non-inherited
832 target method can quickly and simply find it. */
833 current_target.beneath = target_stack;
834
835 if (targetdebug)
836 setup_target_debug ();
837 }
838
839 /* Push a new target type into the stack of the existing target accessors,
840 possibly superseding some of the existing accessors.
841
842 Result is zero if the pushed target ended up on top of the stack,
843 nonzero if at least one target is on top of it.
844
845 Rather than allow an empty stack, we always have the dummy target at
846 the bottom stratum, so we can call the function vectors without
847 checking them. */
848
849 int
850 push_target (struct target_ops *t)
851 {
852 struct target_ops **cur;
853
854 /* Check magic number. If wrong, it probably means someone changed
855 the struct definition, but not all the places that initialize one. */
856 if (t->to_magic != OPS_MAGIC)
857 {
858 fprintf_unfiltered (gdb_stderr,
859 "Magic number of %s target struct wrong\n",
860 t->to_shortname);
861 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
862 }
863
864 /* Find the proper stratum to install this target in. */
865 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
866 {
867 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
868 break;
869 }
870
871 /* If there's already targets at this stratum, remove them. */
872 /* FIXME: cagney/2003-10-15: I think this should be popping all
873 targets to CUR, and not just those at this stratum level. */
874 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
875 {
876 /* There's already something at this stratum level. Close it,
877 and un-hook it from the stack. */
878 struct target_ops *tmp = (*cur);
879 (*cur) = (*cur)->beneath;
880 tmp->beneath = NULL;
881 target_close (tmp, 0);
882 }
883
884 /* We have removed all targets in our stratum, now add the new one. */
885 t->beneath = (*cur);
886 (*cur) = t;
887
888 update_current_target ();
889
890 /* Not on top? */
891 return (t != target_stack);
892 }
893
894 /* Remove a target_ops vector from the stack, wherever it may be.
895 Return how many times it was removed (0 or 1). */
896
897 int
898 unpush_target (struct target_ops *t)
899 {
900 struct target_ops **cur;
901 struct target_ops *tmp;
902
903 if (t->to_stratum == dummy_stratum)
904 internal_error (__FILE__, __LINE__,
905 "Attempt to unpush the dummy target");
906
907 /* Look for the specified target. Note that we assume that a target
908 can only occur once in the target stack. */
909
910 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
911 {
912 if ((*cur) == t)
913 break;
914 }
915
916 if ((*cur) == NULL)
917 return 0; /* Didn't find target_ops, quit now */
918
919 /* NOTE: cagney/2003-12-06: In '94 the close call was made
920 unconditional by moving it to before the above check that the
921 target was in the target stack (something about "Change the way
922 pushing and popping of targets work to support target overlays
923 and inheritance"). This doesn't make much sense - only open
924 targets should be closed. */
925 target_close (t, 0);
926
927 /* Unchain the target */
928 tmp = (*cur);
929 (*cur) = (*cur)->beneath;
930 tmp->beneath = NULL;
931
932 update_current_target ();
933
934 return 1;
935 }
936
937 void
938 pop_target (void)
939 {
940 target_close (target_stack, 0); /* Let it clean up */
941 if (unpush_target (target_stack) == 1)
942 return;
943
944 fprintf_unfiltered (gdb_stderr,
945 "pop_target couldn't find target %s\n",
946 current_target.to_shortname);
947 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
948 }
949
950 void
951 pop_all_targets_above (enum strata above_stratum, int quitting)
952 {
953 while ((int) (current_target.to_stratum) > (int) above_stratum)
954 {
955 target_close (target_stack, quitting);
956 if (!unpush_target (target_stack))
957 {
958 fprintf_unfiltered (gdb_stderr,
959 "pop_all_targets couldn't find target %s\n",
960 target_stack->to_shortname);
961 internal_error (__FILE__, __LINE__,
962 _("failed internal consistency check"));
963 break;
964 }
965 }
966 }
967
968 void
969 pop_all_targets (int quitting)
970 {
971 pop_all_targets_above (dummy_stratum, quitting);
972 }
973
974 /* Using the objfile specified in OBJFILE, find the address for the
975 current thread's thread-local storage with offset OFFSET. */
976 CORE_ADDR
977 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
978 {
979 volatile CORE_ADDR addr = 0;
980 struct target_ops *target;
981
982 for (target = current_target.beneath;
983 target != NULL;
984 target = target->beneath)
985 {
986 if (target->to_get_thread_local_address != NULL)
987 break;
988 }
989
990 if (target != NULL
991 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
992 {
993 ptid_t ptid = inferior_ptid;
994 volatile struct gdb_exception ex;
995
996 TRY_CATCH (ex, RETURN_MASK_ALL)
997 {
998 CORE_ADDR lm_addr;
999
1000 /* Fetch the load module address for this objfile. */
1001 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1002 objfile);
1003 /* If it's 0, throw the appropriate exception. */
1004 if (lm_addr == 0)
1005 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1006 _("TLS load module not found"));
1007
1008 addr = target->to_get_thread_local_address (target, ptid, lm_addr, offset);
1009 }
1010 /* If an error occurred, print TLS related messages here. Otherwise,
1011 throw the error to some higher catcher. */
1012 if (ex.reason < 0)
1013 {
1014 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1015
1016 switch (ex.error)
1017 {
1018 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1019 error (_("Cannot find thread-local variables in this thread library."));
1020 break;
1021 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1022 if (objfile_is_library)
1023 error (_("Cannot find shared library `%s' in dynamic"
1024 " linker's load module list"), objfile->name);
1025 else
1026 error (_("Cannot find executable file `%s' in dynamic"
1027 " linker's load module list"), objfile->name);
1028 break;
1029 case TLS_NOT_ALLOCATED_YET_ERROR:
1030 if (objfile_is_library)
1031 error (_("The inferior has not yet allocated storage for"
1032 " thread-local variables in\n"
1033 "the shared library `%s'\n"
1034 "for %s"),
1035 objfile->name, target_pid_to_str (ptid));
1036 else
1037 error (_("The inferior has not yet allocated storage for"
1038 " thread-local variables in\n"
1039 "the executable `%s'\n"
1040 "for %s"),
1041 objfile->name, target_pid_to_str (ptid));
1042 break;
1043 case TLS_GENERIC_ERROR:
1044 if (objfile_is_library)
1045 error (_("Cannot find thread-local storage for %s, "
1046 "shared library %s:\n%s"),
1047 target_pid_to_str (ptid),
1048 objfile->name, ex.message);
1049 else
1050 error (_("Cannot find thread-local storage for %s, "
1051 "executable file %s:\n%s"),
1052 target_pid_to_str (ptid),
1053 objfile->name, ex.message);
1054 break;
1055 default:
1056 throw_exception (ex);
1057 break;
1058 }
1059 }
1060 }
1061 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1062 TLS is an ABI-specific thing. But we don't do that yet. */
1063 else
1064 error (_("Cannot find thread-local variables on this target"));
1065
1066 return addr;
1067 }
1068
1069 #undef MIN
1070 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1071
1072 /* target_read_string -- read a null terminated string, up to LEN bytes,
1073 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1074 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1075 is responsible for freeing it. Return the number of bytes successfully
1076 read. */
1077
1078 int
1079 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1080 {
1081 int tlen, origlen, offset, i;
1082 gdb_byte buf[4];
1083 int errcode = 0;
1084 char *buffer;
1085 int buffer_allocated;
1086 char *bufptr;
1087 unsigned int nbytes_read = 0;
1088
1089 gdb_assert (string);
1090
1091 /* Small for testing. */
1092 buffer_allocated = 4;
1093 buffer = xmalloc (buffer_allocated);
1094 bufptr = buffer;
1095
1096 origlen = len;
1097
1098 while (len > 0)
1099 {
1100 tlen = MIN (len, 4 - (memaddr & 3));
1101 offset = memaddr & 3;
1102
1103 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1104 if (errcode != 0)
1105 {
1106 /* The transfer request might have crossed the boundary to an
1107 unallocated region of memory. Retry the transfer, requesting
1108 a single byte. */
1109 tlen = 1;
1110 offset = 0;
1111 errcode = target_read_memory (memaddr, buf, 1);
1112 if (errcode != 0)
1113 goto done;
1114 }
1115
1116 if (bufptr - buffer + tlen > buffer_allocated)
1117 {
1118 unsigned int bytes;
1119 bytes = bufptr - buffer;
1120 buffer_allocated *= 2;
1121 buffer = xrealloc (buffer, buffer_allocated);
1122 bufptr = buffer + bytes;
1123 }
1124
1125 for (i = 0; i < tlen; i++)
1126 {
1127 *bufptr++ = buf[i + offset];
1128 if (buf[i + offset] == '\000')
1129 {
1130 nbytes_read += i + 1;
1131 goto done;
1132 }
1133 }
1134
1135 memaddr += tlen;
1136 len -= tlen;
1137 nbytes_read += tlen;
1138 }
1139 done:
1140 *string = buffer;
1141 if (errnop != NULL)
1142 *errnop = errcode;
1143 return nbytes_read;
1144 }
1145
1146 struct target_section_table *
1147 target_get_section_table (struct target_ops *target)
1148 {
1149 struct target_ops *t;
1150
1151 if (targetdebug)
1152 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1153
1154 for (t = target; t != NULL; t = t->beneath)
1155 if (t->to_get_section_table != NULL)
1156 return (*t->to_get_section_table) (t);
1157
1158 return NULL;
1159 }
1160
1161 /* Find a section containing ADDR. */
1162
1163 struct target_section *
1164 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1165 {
1166 struct target_section_table *table = target_get_section_table (target);
1167 struct target_section *secp;
1168
1169 if (table == NULL)
1170 return NULL;
1171
1172 for (secp = table->sections; secp < table->sections_end; secp++)
1173 {
1174 if (addr >= secp->addr && addr < secp->endaddr)
1175 return secp;
1176 }
1177 return NULL;
1178 }
1179
1180 /* Perform a partial memory transfer. The arguments and return
1181 value are just as for target_xfer_partial. */
1182
1183 static LONGEST
1184 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1185 void *readbuf, const void *writebuf, ULONGEST memaddr,
1186 LONGEST len)
1187 {
1188 LONGEST res;
1189 int reg_len;
1190 struct mem_region *region;
1191 struct inferior *inf;
1192
1193 /* Zero length requests are ok and require no work. */
1194 if (len == 0)
1195 return 0;
1196
1197 /* For accesses to unmapped overlay sections, read directly from
1198 files. Must do this first, as MEMADDR may need adjustment. */
1199 if (readbuf != NULL && overlay_debugging)
1200 {
1201 struct obj_section *section = find_pc_overlay (memaddr);
1202 if (pc_in_unmapped_range (memaddr, section))
1203 {
1204 struct target_section_table *table
1205 = target_get_section_table (ops);
1206 const char *section_name = section->the_bfd_section->name;
1207 memaddr = overlay_mapped_address (memaddr, section);
1208 return section_table_xfer_memory_partial (readbuf, writebuf,
1209 memaddr, len,
1210 table->sections,
1211 table->sections_end,
1212 section_name);
1213 }
1214 }
1215
1216 /* Try the executable files, if "trust-readonly-sections" is set. */
1217 if (readbuf != NULL && trust_readonly)
1218 {
1219 struct target_section *secp;
1220 struct target_section_table *table;
1221
1222 secp = target_section_by_addr (ops, memaddr);
1223 if (secp != NULL
1224 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1225 & SEC_READONLY))
1226 {
1227 table = target_get_section_table (ops);
1228 return section_table_xfer_memory_partial (readbuf, writebuf,
1229 memaddr, len,
1230 table->sections,
1231 table->sections_end,
1232 NULL);
1233 }
1234 }
1235
1236 /* Try GDB's internal data cache. */
1237 region = lookup_mem_region (memaddr);
1238 /* region->hi == 0 means there's no upper bound. */
1239 if (memaddr + len < region->hi || region->hi == 0)
1240 reg_len = len;
1241 else
1242 reg_len = region->hi - memaddr;
1243
1244 switch (region->attrib.mode)
1245 {
1246 case MEM_RO:
1247 if (writebuf != NULL)
1248 return -1;
1249 break;
1250
1251 case MEM_WO:
1252 if (readbuf != NULL)
1253 return -1;
1254 break;
1255
1256 case MEM_FLASH:
1257 /* We only support writing to flash during "load" for now. */
1258 if (writebuf != NULL)
1259 error (_("Writing to flash memory forbidden in this context"));
1260 break;
1261
1262 case MEM_NONE:
1263 return -1;
1264 }
1265
1266 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1267
1268 if (inf != NULL
1269 && (region->attrib.cache
1270 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1271 {
1272 if (readbuf != NULL)
1273 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1274 reg_len, 0);
1275 else
1276 /* FIXME drow/2006-08-09: If we're going to preserve const
1277 correctness dcache_xfer_memory should take readbuf and
1278 writebuf. */
1279 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1280 (void *) writebuf,
1281 reg_len, 1);
1282 if (res <= 0)
1283 return -1;
1284 else
1285 {
1286 if (readbuf && !show_memory_breakpoints)
1287 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1288 return res;
1289 }
1290 }
1291
1292 /* If none of those methods found the memory we wanted, fall back
1293 to a target partial transfer. Normally a single call to
1294 to_xfer_partial is enough; if it doesn't recognize an object
1295 it will call the to_xfer_partial of the next target down.
1296 But for memory this won't do. Memory is the only target
1297 object which can be read from more than one valid target.
1298 A core file, for instance, could have some of memory but
1299 delegate other bits to the target below it. So, we must
1300 manually try all targets. */
1301
1302 do
1303 {
1304 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1305 readbuf, writebuf, memaddr, reg_len);
1306 if (res > 0)
1307 break;
1308
1309 /* We want to continue past core files to executables, but not
1310 past a running target's memory. */
1311 if (ops->to_has_all_memory (ops))
1312 break;
1313
1314 ops = ops->beneath;
1315 }
1316 while (ops != NULL);
1317
1318 if (readbuf && !show_memory_breakpoints)
1319 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1320
1321 /* Make sure the cache gets updated no matter what - if we are writing
1322 to the stack. Even if this write is not tagged as such, we still need
1323 to update the cache. */
1324
1325 if (res > 0
1326 && inf != NULL
1327 && writebuf != NULL
1328 && !region->attrib.cache
1329 && stack_cache_enabled_p
1330 && object != TARGET_OBJECT_STACK_MEMORY)
1331 {
1332 dcache_update (target_dcache, memaddr, (void *) writebuf, reg_len);
1333 }
1334
1335 /* If we still haven't got anything, return the last error. We
1336 give up. */
1337 return res;
1338 }
1339
1340 static void
1341 restore_show_memory_breakpoints (void *arg)
1342 {
1343 show_memory_breakpoints = (uintptr_t) arg;
1344 }
1345
1346 struct cleanup *
1347 make_show_memory_breakpoints_cleanup (int show)
1348 {
1349 int current = show_memory_breakpoints;
1350 show_memory_breakpoints = show;
1351
1352 return make_cleanup (restore_show_memory_breakpoints,
1353 (void *) (uintptr_t) current);
1354 }
1355
1356 static LONGEST
1357 target_xfer_partial (struct target_ops *ops,
1358 enum target_object object, const char *annex,
1359 void *readbuf, const void *writebuf,
1360 ULONGEST offset, LONGEST len)
1361 {
1362 LONGEST retval;
1363
1364 gdb_assert (ops->to_xfer_partial != NULL);
1365
1366 /* If this is a memory transfer, let the memory-specific code
1367 have a look at it instead. Memory transfers are more
1368 complicated. */
1369 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1370 retval = memory_xfer_partial (ops, object, readbuf,
1371 writebuf, offset, len);
1372 else
1373 {
1374 enum target_object raw_object = object;
1375
1376 /* If this is a raw memory transfer, request the normal
1377 memory object from other layers. */
1378 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1379 raw_object = TARGET_OBJECT_MEMORY;
1380
1381 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1382 writebuf, offset, len);
1383 }
1384
1385 if (targetdebug)
1386 {
1387 const unsigned char *myaddr = NULL;
1388
1389 fprintf_unfiltered (gdb_stdlog,
1390 "%s:target_xfer_partial (%d, %s, %s, %s, %s, %s) = %s",
1391 ops->to_shortname,
1392 (int) object,
1393 (annex ? annex : "(null)"),
1394 host_address_to_string (readbuf),
1395 host_address_to_string (writebuf),
1396 core_addr_to_string_nz (offset),
1397 plongest (len), plongest (retval));
1398
1399 if (readbuf)
1400 myaddr = readbuf;
1401 if (writebuf)
1402 myaddr = writebuf;
1403 if (retval > 0 && myaddr != NULL)
1404 {
1405 int i;
1406
1407 fputs_unfiltered (", bytes =", gdb_stdlog);
1408 for (i = 0; i < retval; i++)
1409 {
1410 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1411 {
1412 if (targetdebug < 2 && i > 0)
1413 {
1414 fprintf_unfiltered (gdb_stdlog, " ...");
1415 break;
1416 }
1417 fprintf_unfiltered (gdb_stdlog, "\n");
1418 }
1419
1420 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1421 }
1422 }
1423
1424 fputc_unfiltered ('\n', gdb_stdlog);
1425 }
1426 return retval;
1427 }
1428
1429 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1430 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1431 if any error occurs.
1432
1433 If an error occurs, no guarantee is made about the contents of the data at
1434 MYADDR. In particular, the caller should not depend upon partial reads
1435 filling the buffer with good data. There is no way for the caller to know
1436 how much good data might have been transfered anyway. Callers that can
1437 deal with partial reads should call target_read (which will retry until
1438 it makes no progress, and then return how much was transferred). */
1439
1440 int
1441 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1442 {
1443 /* Dispatch to the topmost target, not the flattened current_target.
1444 Memory accesses check target->to_has_(all_)memory, and the
1445 flattened target doesn't inherit those. */
1446 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1447 myaddr, memaddr, len) == len)
1448 return 0;
1449 else
1450 return EIO;
1451 }
1452
1453 /* Like target_read_memory, but specify explicitly that this is a read from
1454 the target's stack. This may trigger different cache behavior. */
1455
1456 int
1457 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1458 {
1459 /* Dispatch to the topmost target, not the flattened current_target.
1460 Memory accesses check target->to_has_(all_)memory, and the
1461 flattened target doesn't inherit those. */
1462
1463 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1464 myaddr, memaddr, len) == len)
1465 return 0;
1466 else
1467 return EIO;
1468 }
1469
1470 int
1471 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1472 {
1473 /* Dispatch to the topmost target, not the flattened current_target.
1474 Memory accesses check target->to_has_(all_)memory, and the
1475 flattened target doesn't inherit those. */
1476 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1477 myaddr, memaddr, len) == len)
1478 return 0;
1479 else
1480 return EIO;
1481 }
1482
1483 /* Fetch the target's memory map. */
1484
1485 VEC(mem_region_s) *
1486 target_memory_map (void)
1487 {
1488 VEC(mem_region_s) *result;
1489 struct mem_region *last_one, *this_one;
1490 int ix;
1491 struct target_ops *t;
1492
1493 if (targetdebug)
1494 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1495
1496 for (t = current_target.beneath; t != NULL; t = t->beneath)
1497 if (t->to_memory_map != NULL)
1498 break;
1499
1500 if (t == NULL)
1501 return NULL;
1502
1503 result = t->to_memory_map (t);
1504 if (result == NULL)
1505 return NULL;
1506
1507 qsort (VEC_address (mem_region_s, result),
1508 VEC_length (mem_region_s, result),
1509 sizeof (struct mem_region), mem_region_cmp);
1510
1511 /* Check that regions do not overlap. Simultaneously assign
1512 a numbering for the "mem" commands to use to refer to
1513 each region. */
1514 last_one = NULL;
1515 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1516 {
1517 this_one->number = ix;
1518
1519 if (last_one && last_one->hi > this_one->lo)
1520 {
1521 warning (_("Overlapping regions in memory map: ignoring"));
1522 VEC_free (mem_region_s, result);
1523 return NULL;
1524 }
1525 last_one = this_one;
1526 }
1527
1528 return result;
1529 }
1530
1531 void
1532 target_flash_erase (ULONGEST address, LONGEST length)
1533 {
1534 struct target_ops *t;
1535
1536 for (t = current_target.beneath; t != NULL; t = t->beneath)
1537 if (t->to_flash_erase != NULL)
1538 {
1539 if (targetdebug)
1540 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1541 hex_string (address), phex (length, 0));
1542 t->to_flash_erase (t, address, length);
1543 return;
1544 }
1545
1546 tcomplain ();
1547 }
1548
1549 void
1550 target_flash_done (void)
1551 {
1552 struct target_ops *t;
1553
1554 for (t = current_target.beneath; t != NULL; t = t->beneath)
1555 if (t->to_flash_done != NULL)
1556 {
1557 if (targetdebug)
1558 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1559 t->to_flash_done (t);
1560 return;
1561 }
1562
1563 tcomplain ();
1564 }
1565
1566 static void
1567 show_trust_readonly (struct ui_file *file, int from_tty,
1568 struct cmd_list_element *c, const char *value)
1569 {
1570 fprintf_filtered (file, _("\
1571 Mode for reading from readonly sections is %s.\n"),
1572 value);
1573 }
1574
1575 /* More generic transfers. */
1576
1577 static LONGEST
1578 default_xfer_partial (struct target_ops *ops, enum target_object object,
1579 const char *annex, gdb_byte *readbuf,
1580 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1581 {
1582 if (object == TARGET_OBJECT_MEMORY
1583 && ops->deprecated_xfer_memory != NULL)
1584 /* If available, fall back to the target's
1585 "deprecated_xfer_memory" method. */
1586 {
1587 int xfered = -1;
1588 errno = 0;
1589 if (writebuf != NULL)
1590 {
1591 void *buffer = xmalloc (len);
1592 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1593 memcpy (buffer, writebuf, len);
1594 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1595 1/*write*/, NULL, ops);
1596 do_cleanups (cleanup);
1597 }
1598 if (readbuf != NULL)
1599 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1600 0/*read*/, NULL, ops);
1601 if (xfered > 0)
1602 return xfered;
1603 else if (xfered == 0 && errno == 0)
1604 /* "deprecated_xfer_memory" uses 0, cross checked against
1605 ERRNO as one indication of an error. */
1606 return 0;
1607 else
1608 return -1;
1609 }
1610 else if (ops->beneath != NULL)
1611 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1612 readbuf, writebuf, offset, len);
1613 else
1614 return -1;
1615 }
1616
1617 /* The xfer_partial handler for the topmost target. Unlike the default,
1618 it does not need to handle memory specially; it just passes all
1619 requests down the stack. */
1620
1621 static LONGEST
1622 current_xfer_partial (struct target_ops *ops, enum target_object object,
1623 const char *annex, gdb_byte *readbuf,
1624 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1625 {
1626 if (ops->beneath != NULL)
1627 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1628 readbuf, writebuf, offset, len);
1629 else
1630 return -1;
1631 }
1632
1633 /* Target vector read/write partial wrapper functions.
1634
1635 NOTE: cagney/2003-10-21: I wonder if having "to_xfer_partial
1636 (inbuf, outbuf)", instead of separate read/write methods, make life
1637 easier. */
1638
1639 static LONGEST
1640 target_read_partial (struct target_ops *ops,
1641 enum target_object object,
1642 const char *annex, gdb_byte *buf,
1643 ULONGEST offset, LONGEST len)
1644 {
1645 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1646 }
1647
1648 static LONGEST
1649 target_write_partial (struct target_ops *ops,
1650 enum target_object object,
1651 const char *annex, const gdb_byte *buf,
1652 ULONGEST offset, LONGEST len)
1653 {
1654 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1655 }
1656
1657 /* Wrappers to perform the full transfer. */
1658 LONGEST
1659 target_read (struct target_ops *ops,
1660 enum target_object object,
1661 const char *annex, gdb_byte *buf,
1662 ULONGEST offset, LONGEST len)
1663 {
1664 LONGEST xfered = 0;
1665 while (xfered < len)
1666 {
1667 LONGEST xfer = target_read_partial (ops, object, annex,
1668 (gdb_byte *) buf + xfered,
1669 offset + xfered, len - xfered);
1670 /* Call an observer, notifying them of the xfer progress? */
1671 if (xfer == 0)
1672 return xfered;
1673 if (xfer < 0)
1674 return -1;
1675 xfered += xfer;
1676 QUIT;
1677 }
1678 return len;
1679 }
1680
1681 LONGEST
1682 target_read_until_error (struct target_ops *ops,
1683 enum target_object object,
1684 const char *annex, gdb_byte *buf,
1685 ULONGEST offset, LONGEST len)
1686 {
1687 LONGEST xfered = 0;
1688 while (xfered < len)
1689 {
1690 LONGEST xfer = target_read_partial (ops, object, annex,
1691 (gdb_byte *) buf + xfered,
1692 offset + xfered, len - xfered);
1693 /* Call an observer, notifying them of the xfer progress? */
1694 if (xfer == 0)
1695 return xfered;
1696 if (xfer < 0)
1697 {
1698 /* We've got an error. Try to read in smaller blocks. */
1699 ULONGEST start = offset + xfered;
1700 ULONGEST remaining = len - xfered;
1701 ULONGEST half;
1702
1703 /* If an attempt was made to read a random memory address,
1704 it's likely that the very first byte is not accessible.
1705 Try reading the first byte, to avoid doing log N tries
1706 below. */
1707 xfer = target_read_partial (ops, object, annex,
1708 (gdb_byte *) buf + xfered, start, 1);
1709 if (xfer <= 0)
1710 return xfered;
1711 start += 1;
1712 remaining -= 1;
1713 half = remaining/2;
1714
1715 while (half > 0)
1716 {
1717 xfer = target_read_partial (ops, object, annex,
1718 (gdb_byte *) buf + xfered,
1719 start, half);
1720 if (xfer == 0)
1721 return xfered;
1722 if (xfer < 0)
1723 {
1724 remaining = half;
1725 }
1726 else
1727 {
1728 /* We have successfully read the first half. So, the
1729 error must be in the second half. Adjust start and
1730 remaining to point at the second half. */
1731 xfered += xfer;
1732 start += xfer;
1733 remaining -= xfer;
1734 }
1735 half = remaining/2;
1736 }
1737
1738 return xfered;
1739 }
1740 xfered += xfer;
1741 QUIT;
1742 }
1743 return len;
1744 }
1745
1746
1747 /* An alternative to target_write with progress callbacks. */
1748
1749 LONGEST
1750 target_write_with_progress (struct target_ops *ops,
1751 enum target_object object,
1752 const char *annex, const gdb_byte *buf,
1753 ULONGEST offset, LONGEST len,
1754 void (*progress) (ULONGEST, void *), void *baton)
1755 {
1756 LONGEST xfered = 0;
1757
1758 /* Give the progress callback a chance to set up. */
1759 if (progress)
1760 (*progress) (0, baton);
1761
1762 while (xfered < len)
1763 {
1764 LONGEST xfer = target_write_partial (ops, object, annex,
1765 (gdb_byte *) buf + xfered,
1766 offset + xfered, len - xfered);
1767
1768 if (xfer == 0)
1769 return xfered;
1770 if (xfer < 0)
1771 return -1;
1772
1773 if (progress)
1774 (*progress) (xfer, baton);
1775
1776 xfered += xfer;
1777 QUIT;
1778 }
1779 return len;
1780 }
1781
1782 LONGEST
1783 target_write (struct target_ops *ops,
1784 enum target_object object,
1785 const char *annex, const gdb_byte *buf,
1786 ULONGEST offset, LONGEST len)
1787 {
1788 return target_write_with_progress (ops, object, annex, buf, offset, len,
1789 NULL, NULL);
1790 }
1791
1792 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1793 the size of the transferred data. PADDING additional bytes are
1794 available in *BUF_P. This is a helper function for
1795 target_read_alloc; see the declaration of that function for more
1796 information. */
1797
1798 static LONGEST
1799 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1800 const char *annex, gdb_byte **buf_p, int padding)
1801 {
1802 size_t buf_alloc, buf_pos;
1803 gdb_byte *buf;
1804 LONGEST n;
1805
1806 /* This function does not have a length parameter; it reads the
1807 entire OBJECT). Also, it doesn't support objects fetched partly
1808 from one target and partly from another (in a different stratum,
1809 e.g. a core file and an executable). Both reasons make it
1810 unsuitable for reading memory. */
1811 gdb_assert (object != TARGET_OBJECT_MEMORY);
1812
1813 /* Start by reading up to 4K at a time. The target will throttle
1814 this number down if necessary. */
1815 buf_alloc = 4096;
1816 buf = xmalloc (buf_alloc);
1817 buf_pos = 0;
1818 while (1)
1819 {
1820 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1821 buf_pos, buf_alloc - buf_pos - padding);
1822 if (n < 0)
1823 {
1824 /* An error occurred. */
1825 xfree (buf);
1826 return -1;
1827 }
1828 else if (n == 0)
1829 {
1830 /* Read all there was. */
1831 if (buf_pos == 0)
1832 xfree (buf);
1833 else
1834 *buf_p = buf;
1835 return buf_pos;
1836 }
1837
1838 buf_pos += n;
1839
1840 /* If the buffer is filling up, expand it. */
1841 if (buf_alloc < buf_pos * 2)
1842 {
1843 buf_alloc *= 2;
1844 buf = xrealloc (buf, buf_alloc);
1845 }
1846
1847 QUIT;
1848 }
1849 }
1850
1851 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1852 the size of the transferred data. See the declaration in "target.h"
1853 function for more information about the return value. */
1854
1855 LONGEST
1856 target_read_alloc (struct target_ops *ops, enum target_object object,
1857 const char *annex, gdb_byte **buf_p)
1858 {
1859 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1860 }
1861
1862 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1863 returned as a string, allocated using xmalloc. If an error occurs
1864 or the transfer is unsupported, NULL is returned. Empty objects
1865 are returned as allocated but empty strings. A warning is issued
1866 if the result contains any embedded NUL bytes. */
1867
1868 char *
1869 target_read_stralloc (struct target_ops *ops, enum target_object object,
1870 const char *annex)
1871 {
1872 gdb_byte *buffer;
1873 LONGEST transferred;
1874
1875 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1876
1877 if (transferred < 0)
1878 return NULL;
1879
1880 if (transferred == 0)
1881 return xstrdup ("");
1882
1883 buffer[transferred] = 0;
1884 if (strlen (buffer) < transferred)
1885 warning (_("target object %d, annex %s, "
1886 "contained unexpected null characters"),
1887 (int) object, annex ? annex : "(none)");
1888
1889 return (char *) buffer;
1890 }
1891
1892 /* Memory transfer methods. */
1893
1894 void
1895 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1896 LONGEST len)
1897 {
1898 /* This method is used to read from an alternate, non-current
1899 target. This read must bypass the overlay support (as symbols
1900 don't match this target), and GDB's internal cache (wrong cache
1901 for this target). */
1902 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1903 != len)
1904 memory_error (EIO, addr);
1905 }
1906
1907 ULONGEST
1908 get_target_memory_unsigned (struct target_ops *ops,
1909 CORE_ADDR addr, int len, enum bfd_endian byte_order)
1910 {
1911 gdb_byte buf[sizeof (ULONGEST)];
1912
1913 gdb_assert (len <= sizeof (buf));
1914 get_target_memory (ops, addr, buf, len);
1915 return extract_unsigned_integer (buf, len, byte_order);
1916 }
1917
1918 static void
1919 target_info (char *args, int from_tty)
1920 {
1921 struct target_ops *t;
1922 int has_all_mem = 0;
1923
1924 if (symfile_objfile != NULL)
1925 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
1926
1927 for (t = target_stack; t != NULL; t = t->beneath)
1928 {
1929 if (!(*t->to_has_memory) (t))
1930 continue;
1931
1932 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1933 continue;
1934 if (has_all_mem)
1935 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
1936 printf_unfiltered ("%s:\n", t->to_longname);
1937 (t->to_files_info) (t);
1938 has_all_mem = (*t->to_has_all_memory) (t);
1939 }
1940 }
1941
1942 /* This function is called before any new inferior is created, e.g.
1943 by running a program, attaching, or connecting to a target.
1944 It cleans up any state from previous invocations which might
1945 change between runs. This is a subset of what target_preopen
1946 resets (things which might change between targets). */
1947
1948 void
1949 target_pre_inferior (int from_tty)
1950 {
1951 /* Clear out solib state. Otherwise the solib state of the previous
1952 inferior might have survived and is entirely wrong for the new
1953 target. This has been observed on GNU/Linux using glibc 2.3. How
1954 to reproduce:
1955
1956 bash$ ./foo&
1957 [1] 4711
1958 bash$ ./foo&
1959 [1] 4712
1960 bash$ gdb ./foo
1961 [...]
1962 (gdb) attach 4711
1963 (gdb) detach
1964 (gdb) attach 4712
1965 Cannot access memory at address 0xdeadbeef
1966 */
1967
1968 /* In some OSs, the shared library list is the same/global/shared
1969 across inferiors. If code is shared between processes, so are
1970 memory regions and features. */
1971 if (!gdbarch_has_global_solist (target_gdbarch))
1972 {
1973 no_shared_libraries (NULL, from_tty);
1974
1975 invalidate_target_mem_regions ();
1976
1977 target_clear_description ();
1978 }
1979 }
1980
1981 /* Callback for iterate_over_inferiors. Gets rid of the given
1982 inferior. */
1983
1984 static int
1985 dispose_inferior (struct inferior *inf, void *args)
1986 {
1987 struct thread_info *thread;
1988
1989 thread = any_thread_of_process (inf->pid);
1990 if (thread)
1991 {
1992 switch_to_thread (thread->ptid);
1993
1994 /* Core inferiors actually should be detached, not killed. */
1995 if (target_has_execution)
1996 target_kill ();
1997 else
1998 target_detach (NULL, 0);
1999 }
2000
2001 return 0;
2002 }
2003
2004 /* This is to be called by the open routine before it does
2005 anything. */
2006
2007 void
2008 target_preopen (int from_tty)
2009 {
2010 dont_repeat ();
2011
2012 if (have_inferiors ())
2013 {
2014 if (!from_tty
2015 || !have_live_inferiors ()
2016 || query (_("A program is being debugged already. Kill it? ")))
2017 iterate_over_inferiors (dispose_inferior, NULL);
2018 else
2019 error (_("Program not killed."));
2020 }
2021
2022 /* Calling target_kill may remove the target from the stack. But if
2023 it doesn't (which seems like a win for UDI), remove it now. */
2024 /* Leave the exec target, though. The user may be switching from a
2025 live process to a core of the same program. */
2026 pop_all_targets_above (file_stratum, 0);
2027
2028 target_pre_inferior (from_tty);
2029 }
2030
2031 /* Detach a target after doing deferred register stores. */
2032
2033 void
2034 target_detach (char *args, int from_tty)
2035 {
2036 struct target_ops* t;
2037
2038 if (gdbarch_has_global_breakpoints (target_gdbarch))
2039 /* Don't remove global breakpoints here. They're removed on
2040 disconnection from the target. */
2041 ;
2042 else
2043 /* If we're in breakpoints-always-inserted mode, have to remove
2044 them before detaching. */
2045 remove_breakpoints ();
2046
2047 for (t = current_target.beneath; t != NULL; t = t->beneath)
2048 {
2049 if (t->to_detach != NULL)
2050 {
2051 t->to_detach (t, args, from_tty);
2052 if (targetdebug)
2053 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2054 args, from_tty);
2055 return;
2056 }
2057 }
2058
2059 internal_error (__FILE__, __LINE__, "could not find a target to detach");
2060 }
2061
2062 void
2063 target_disconnect (char *args, int from_tty)
2064 {
2065 struct target_ops *t;
2066
2067 /* If we're in breakpoints-always-inserted mode or if breakpoints
2068 are global across processes, we have to remove them before
2069 disconnecting. */
2070 remove_breakpoints ();
2071
2072 for (t = current_target.beneath; t != NULL; t = t->beneath)
2073 if (t->to_disconnect != NULL)
2074 {
2075 if (targetdebug)
2076 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2077 args, from_tty);
2078 t->to_disconnect (t, args, from_tty);
2079 return;
2080 }
2081
2082 tcomplain ();
2083 }
2084
2085 ptid_t
2086 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2087 {
2088 struct target_ops *t;
2089
2090 for (t = current_target.beneath; t != NULL; t = t->beneath)
2091 {
2092 if (t->to_wait != NULL)
2093 {
2094 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2095
2096 if (targetdebug)
2097 {
2098 char *status_string;
2099
2100 status_string = target_waitstatus_to_string (status);
2101 fprintf_unfiltered (gdb_stdlog,
2102 "target_wait (%d, status) = %d, %s\n",
2103 PIDGET (ptid), PIDGET (retval),
2104 status_string);
2105 xfree (status_string);
2106 }
2107
2108 return retval;
2109 }
2110 }
2111
2112 noprocess ();
2113 }
2114
2115 char *
2116 target_pid_to_str (ptid_t ptid)
2117 {
2118 struct target_ops *t;
2119
2120 for (t = current_target.beneath; t != NULL; t = t->beneath)
2121 {
2122 if (t->to_pid_to_str != NULL)
2123 return (*t->to_pid_to_str) (t, ptid);
2124 }
2125
2126 return normal_pid_to_str (ptid);
2127 }
2128
2129 void
2130 target_resume (ptid_t ptid, int step, enum target_signal signal)
2131 {
2132 struct target_ops *t;
2133
2134 target_dcache_invalidate ();
2135
2136 for (t = current_target.beneath; t != NULL; t = t->beneath)
2137 {
2138 if (t->to_resume != NULL)
2139 {
2140 t->to_resume (t, ptid, step, signal);
2141 if (targetdebug)
2142 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2143 PIDGET (ptid),
2144 step ? "step" : "continue",
2145 target_signal_to_name (signal));
2146
2147 set_executing (ptid, 1);
2148 set_running (ptid, 1);
2149 clear_inline_frame_state (ptid);
2150 return;
2151 }
2152 }
2153
2154 noprocess ();
2155 }
2156 /* Look through the list of possible targets for a target that can
2157 follow forks. */
2158
2159 int
2160 target_follow_fork (int follow_child)
2161 {
2162 struct target_ops *t;
2163
2164 for (t = current_target.beneath; t != NULL; t = t->beneath)
2165 {
2166 if (t->to_follow_fork != NULL)
2167 {
2168 int retval = t->to_follow_fork (t, follow_child);
2169 if (targetdebug)
2170 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2171 follow_child, retval);
2172 return retval;
2173 }
2174 }
2175
2176 /* Some target returned a fork event, but did not know how to follow it. */
2177 internal_error (__FILE__, __LINE__,
2178 "could not find a target to follow fork");
2179 }
2180
2181 void
2182 target_mourn_inferior (void)
2183 {
2184 struct target_ops *t;
2185 for (t = current_target.beneath; t != NULL; t = t->beneath)
2186 {
2187 if (t->to_mourn_inferior != NULL)
2188 {
2189 t->to_mourn_inferior (t);
2190 if (targetdebug)
2191 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2192
2193 /* We no longer need to keep handles on any of the object files.
2194 Make sure to release them to avoid unnecessarily locking any
2195 of them while we're not actually debugging. */
2196 bfd_cache_close_all ();
2197
2198 return;
2199 }
2200 }
2201
2202 internal_error (__FILE__, __LINE__,
2203 "could not find a target to follow mourn inferiour");
2204 }
2205
2206 /* Look for a target which can describe architectural features, starting
2207 from TARGET. If we find one, return its description. */
2208
2209 const struct target_desc *
2210 target_read_description (struct target_ops *target)
2211 {
2212 struct target_ops *t;
2213
2214 for (t = target; t != NULL; t = t->beneath)
2215 if (t->to_read_description != NULL)
2216 {
2217 const struct target_desc *tdesc;
2218
2219 tdesc = t->to_read_description (t);
2220 if (tdesc)
2221 return tdesc;
2222 }
2223
2224 return NULL;
2225 }
2226
2227 /* The default implementation of to_search_memory.
2228 This implements a basic search of memory, reading target memory and
2229 performing the search here (as opposed to performing the search in on the
2230 target side with, for example, gdbserver). */
2231
2232 int
2233 simple_search_memory (struct target_ops *ops,
2234 CORE_ADDR start_addr, ULONGEST search_space_len,
2235 const gdb_byte *pattern, ULONGEST pattern_len,
2236 CORE_ADDR *found_addrp)
2237 {
2238 /* NOTE: also defined in find.c testcase. */
2239 #define SEARCH_CHUNK_SIZE 16000
2240 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2241 /* Buffer to hold memory contents for searching. */
2242 gdb_byte *search_buf;
2243 unsigned search_buf_size;
2244 struct cleanup *old_cleanups;
2245
2246 search_buf_size = chunk_size + pattern_len - 1;
2247
2248 /* No point in trying to allocate a buffer larger than the search space. */
2249 if (search_space_len < search_buf_size)
2250 search_buf_size = search_space_len;
2251
2252 search_buf = malloc (search_buf_size);
2253 if (search_buf == NULL)
2254 error (_("Unable to allocate memory to perform the search."));
2255 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2256
2257 /* Prime the search buffer. */
2258
2259 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2260 search_buf, start_addr, search_buf_size) != search_buf_size)
2261 {
2262 warning (_("Unable to access target memory at %s, halting search."),
2263 hex_string (start_addr));
2264 do_cleanups (old_cleanups);
2265 return -1;
2266 }
2267
2268 /* Perform the search.
2269
2270 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2271 When we've scanned N bytes we copy the trailing bytes to the start and
2272 read in another N bytes. */
2273
2274 while (search_space_len >= pattern_len)
2275 {
2276 gdb_byte *found_ptr;
2277 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2278
2279 found_ptr = memmem (search_buf, nr_search_bytes,
2280 pattern, pattern_len);
2281
2282 if (found_ptr != NULL)
2283 {
2284 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2285 *found_addrp = found_addr;
2286 do_cleanups (old_cleanups);
2287 return 1;
2288 }
2289
2290 /* Not found in this chunk, skip to next chunk. */
2291
2292 /* Don't let search_space_len wrap here, it's unsigned. */
2293 if (search_space_len >= chunk_size)
2294 search_space_len -= chunk_size;
2295 else
2296 search_space_len = 0;
2297
2298 if (search_space_len >= pattern_len)
2299 {
2300 unsigned keep_len = search_buf_size - chunk_size;
2301 CORE_ADDR read_addr = start_addr + keep_len;
2302 int nr_to_read;
2303
2304 /* Copy the trailing part of the previous iteration to the front
2305 of the buffer for the next iteration. */
2306 gdb_assert (keep_len == pattern_len - 1);
2307 memcpy (search_buf, search_buf + chunk_size, keep_len);
2308
2309 nr_to_read = min (search_space_len - keep_len, chunk_size);
2310
2311 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2312 search_buf + keep_len, read_addr,
2313 nr_to_read) != nr_to_read)
2314 {
2315 warning (_("Unable to access target memory at %s, halting search."),
2316 hex_string (read_addr));
2317 do_cleanups (old_cleanups);
2318 return -1;
2319 }
2320
2321 start_addr += chunk_size;
2322 }
2323 }
2324
2325 /* Not found. */
2326
2327 do_cleanups (old_cleanups);
2328 return 0;
2329 }
2330
2331 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2332 sequence of bytes in PATTERN with length PATTERN_LEN.
2333
2334 The result is 1 if found, 0 if not found, and -1 if there was an error
2335 requiring halting of the search (e.g. memory read error).
2336 If the pattern is found the address is recorded in FOUND_ADDRP. */
2337
2338 int
2339 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2340 const gdb_byte *pattern, ULONGEST pattern_len,
2341 CORE_ADDR *found_addrp)
2342 {
2343 struct target_ops *t;
2344 int found;
2345
2346 /* We don't use INHERIT to set current_target.to_search_memory,
2347 so we have to scan the target stack and handle targetdebug
2348 ourselves. */
2349
2350 if (targetdebug)
2351 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2352 hex_string (start_addr));
2353
2354 for (t = current_target.beneath; t != NULL; t = t->beneath)
2355 if (t->to_search_memory != NULL)
2356 break;
2357
2358 if (t != NULL)
2359 {
2360 found = t->to_search_memory (t, start_addr, search_space_len,
2361 pattern, pattern_len, found_addrp);
2362 }
2363 else
2364 {
2365 /* If a special version of to_search_memory isn't available, use the
2366 simple version. */
2367 found = simple_search_memory (current_target.beneath,
2368 start_addr, search_space_len,
2369 pattern, pattern_len, found_addrp);
2370 }
2371
2372 if (targetdebug)
2373 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2374
2375 return found;
2376 }
2377
2378 /* Look through the currently pushed targets. If none of them will
2379 be able to restart the currently running process, issue an error
2380 message. */
2381
2382 void
2383 target_require_runnable (void)
2384 {
2385 struct target_ops *t;
2386
2387 for (t = target_stack; t != NULL; t = t->beneath)
2388 {
2389 /* If this target knows how to create a new program, then
2390 assume we will still be able to after killing the current
2391 one. Either killing and mourning will not pop T, or else
2392 find_default_run_target will find it again. */
2393 if (t->to_create_inferior != NULL)
2394 return;
2395
2396 /* Do not worry about thread_stratum targets that can not
2397 create inferiors. Assume they will be pushed again if
2398 necessary, and continue to the process_stratum. */
2399 if (t->to_stratum == thread_stratum
2400 || t->to_stratum == arch_stratum)
2401 continue;
2402
2403 error (_("\
2404 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2405 t->to_shortname);
2406 }
2407
2408 /* This function is only called if the target is running. In that
2409 case there should have been a process_stratum target and it
2410 should either know how to create inferiors, or not... */
2411 internal_error (__FILE__, __LINE__, "No targets found");
2412 }
2413
2414 /* Look through the list of possible targets for a target that can
2415 execute a run or attach command without any other data. This is
2416 used to locate the default process stratum.
2417
2418 If DO_MESG is not NULL, the result is always valid (error() is
2419 called for errors); else, return NULL on error. */
2420
2421 static struct target_ops *
2422 find_default_run_target (char *do_mesg)
2423 {
2424 struct target_ops **t;
2425 struct target_ops *runable = NULL;
2426 int count;
2427
2428 count = 0;
2429
2430 for (t = target_structs; t < target_structs + target_struct_size;
2431 ++t)
2432 {
2433 if ((*t)->to_can_run && target_can_run (*t))
2434 {
2435 runable = *t;
2436 ++count;
2437 }
2438 }
2439
2440 if (count != 1)
2441 {
2442 if (do_mesg)
2443 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2444 else
2445 return NULL;
2446 }
2447
2448 return runable;
2449 }
2450
2451 void
2452 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2453 {
2454 struct target_ops *t;
2455
2456 t = find_default_run_target ("attach");
2457 (t->to_attach) (t, args, from_tty);
2458 return;
2459 }
2460
2461 void
2462 find_default_create_inferior (struct target_ops *ops,
2463 char *exec_file, char *allargs, char **env,
2464 int from_tty)
2465 {
2466 struct target_ops *t;
2467
2468 t = find_default_run_target ("run");
2469 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2470 return;
2471 }
2472
2473 static int
2474 find_default_can_async_p (void)
2475 {
2476 struct target_ops *t;
2477
2478 /* This may be called before the target is pushed on the stack;
2479 look for the default process stratum. If there's none, gdb isn't
2480 configured with a native debugger, and target remote isn't
2481 connected yet. */
2482 t = find_default_run_target (NULL);
2483 if (t && t->to_can_async_p)
2484 return (t->to_can_async_p) ();
2485 return 0;
2486 }
2487
2488 static int
2489 find_default_is_async_p (void)
2490 {
2491 struct target_ops *t;
2492
2493 /* This may be called before the target is pushed on the stack;
2494 look for the default process stratum. If there's none, gdb isn't
2495 configured with a native debugger, and target remote isn't
2496 connected yet. */
2497 t = find_default_run_target (NULL);
2498 if (t && t->to_is_async_p)
2499 return (t->to_is_async_p) ();
2500 return 0;
2501 }
2502
2503 static int
2504 find_default_supports_non_stop (void)
2505 {
2506 struct target_ops *t;
2507
2508 t = find_default_run_target (NULL);
2509 if (t && t->to_supports_non_stop)
2510 return (t->to_supports_non_stop) ();
2511 return 0;
2512 }
2513
2514 int
2515 target_supports_non_stop (void)
2516 {
2517 struct target_ops *t;
2518 for (t = &current_target; t != NULL; t = t->beneath)
2519 if (t->to_supports_non_stop)
2520 return t->to_supports_non_stop ();
2521
2522 return 0;
2523 }
2524
2525
2526 char *
2527 target_get_osdata (const char *type)
2528 {
2529 char *document;
2530 struct target_ops *t;
2531
2532 /* If we're already connected to something that can get us OS
2533 related data, use it. Otherwise, try using the native
2534 target. */
2535 if (current_target.to_stratum >= process_stratum)
2536 t = current_target.beneath;
2537 else
2538 t = find_default_run_target ("get OS data");
2539
2540 if (!t)
2541 return NULL;
2542
2543 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2544 }
2545
2546 static int
2547 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2548 {
2549 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2550 }
2551
2552 static int
2553 default_watchpoint_addr_within_range (struct target_ops *target,
2554 CORE_ADDR addr,
2555 CORE_ADDR start, int length)
2556 {
2557 return addr >= start && addr < start + length;
2558 }
2559
2560 static struct gdbarch *
2561 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2562 {
2563 return target_gdbarch;
2564 }
2565
2566 static int
2567 return_zero (void)
2568 {
2569 return 0;
2570 }
2571
2572 static int
2573 return_one (void)
2574 {
2575 return 1;
2576 }
2577
2578 static int
2579 return_minus_one (void)
2580 {
2581 return -1;
2582 }
2583
2584 /* Find a single runnable target in the stack and return it. If for
2585 some reason there is more than one, return NULL. */
2586
2587 struct target_ops *
2588 find_run_target (void)
2589 {
2590 struct target_ops **t;
2591 struct target_ops *runable = NULL;
2592 int count;
2593
2594 count = 0;
2595
2596 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2597 {
2598 if ((*t)->to_can_run && target_can_run (*t))
2599 {
2600 runable = *t;
2601 ++count;
2602 }
2603 }
2604
2605 return (count == 1 ? runable : NULL);
2606 }
2607
2608 /* Find a single core_stratum target in the list of targets and return it.
2609 If for some reason there is more than one, return NULL. */
2610
2611 struct target_ops *
2612 find_core_target (void)
2613 {
2614 struct target_ops **t;
2615 struct target_ops *runable = NULL;
2616 int count;
2617
2618 count = 0;
2619
2620 for (t = target_structs; t < target_structs + target_struct_size;
2621 ++t)
2622 {
2623 if ((*t)->to_stratum == core_stratum)
2624 {
2625 runable = *t;
2626 ++count;
2627 }
2628 }
2629
2630 return (count == 1 ? runable : NULL);
2631 }
2632
2633 /*
2634 * Find the next target down the stack from the specified target.
2635 */
2636
2637 struct target_ops *
2638 find_target_beneath (struct target_ops *t)
2639 {
2640 return t->beneath;
2641 }
2642
2643 \f
2644 /* The inferior process has died. Long live the inferior! */
2645
2646 void
2647 generic_mourn_inferior (void)
2648 {
2649 ptid_t ptid;
2650
2651 ptid = inferior_ptid;
2652 inferior_ptid = null_ptid;
2653
2654 if (!ptid_equal (ptid, null_ptid))
2655 {
2656 int pid = ptid_get_pid (ptid);
2657 delete_inferior (pid);
2658 }
2659
2660 breakpoint_init_inferior (inf_exited);
2661 registers_changed ();
2662
2663 reopen_exec_file ();
2664 reinit_frame_cache ();
2665
2666 if (deprecated_detach_hook)
2667 deprecated_detach_hook ();
2668 }
2669 \f
2670 /* Helper function for child_wait and the derivatives of child_wait.
2671 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2672 translation of that in OURSTATUS. */
2673 void
2674 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2675 {
2676 if (WIFEXITED (hoststatus))
2677 {
2678 ourstatus->kind = TARGET_WAITKIND_EXITED;
2679 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2680 }
2681 else if (!WIFSTOPPED (hoststatus))
2682 {
2683 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2684 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2685 }
2686 else
2687 {
2688 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2689 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2690 }
2691 }
2692 \f
2693 /* Convert a normal process ID to a string. Returns the string in a
2694 static buffer. */
2695
2696 char *
2697 normal_pid_to_str (ptid_t ptid)
2698 {
2699 static char buf[32];
2700
2701 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2702 return buf;
2703 }
2704
2705 static char *
2706 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
2707 {
2708 return normal_pid_to_str (ptid);
2709 }
2710
2711 /* Error-catcher for target_find_memory_regions */
2712 static int dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2713 {
2714 error (_("No target."));
2715 return 0;
2716 }
2717
2718 /* Error-catcher for target_make_corefile_notes */
2719 static char * dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2720 {
2721 error (_("No target."));
2722 return NULL;
2723 }
2724
2725 /* Set up the handful of non-empty slots needed by the dummy target
2726 vector. */
2727
2728 static void
2729 init_dummy_target (void)
2730 {
2731 dummy_target.to_shortname = "None";
2732 dummy_target.to_longname = "None";
2733 dummy_target.to_doc = "";
2734 dummy_target.to_attach = find_default_attach;
2735 dummy_target.to_detach =
2736 (void (*)(struct target_ops *, char *, int))target_ignore;
2737 dummy_target.to_create_inferior = find_default_create_inferior;
2738 dummy_target.to_can_async_p = find_default_can_async_p;
2739 dummy_target.to_is_async_p = find_default_is_async_p;
2740 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2741 dummy_target.to_pid_to_str = dummy_pid_to_str;
2742 dummy_target.to_stratum = dummy_stratum;
2743 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2744 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2745 dummy_target.to_xfer_partial = default_xfer_partial;
2746 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
2747 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
2748 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
2749 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
2750 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
2751 dummy_target.to_magic = OPS_MAGIC;
2752 }
2753 \f
2754 static void
2755 debug_to_open (char *args, int from_tty)
2756 {
2757 debug_target.to_open (args, from_tty);
2758
2759 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2760 }
2761
2762 void
2763 target_close (struct target_ops *targ, int quitting)
2764 {
2765 if (targ->to_xclose != NULL)
2766 targ->to_xclose (targ, quitting);
2767 else if (targ->to_close != NULL)
2768 targ->to_close (quitting);
2769
2770 if (targetdebug)
2771 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2772 }
2773
2774 void
2775 target_attach (char *args, int from_tty)
2776 {
2777 struct target_ops *t;
2778 for (t = current_target.beneath; t != NULL; t = t->beneath)
2779 {
2780 if (t->to_attach != NULL)
2781 {
2782 t->to_attach (t, args, from_tty);
2783 if (targetdebug)
2784 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
2785 args, from_tty);
2786 return;
2787 }
2788 }
2789
2790 internal_error (__FILE__, __LINE__,
2791 "could not find a target to attach");
2792 }
2793
2794 int
2795 target_thread_alive (ptid_t ptid)
2796 {
2797 struct target_ops *t;
2798 for (t = current_target.beneath; t != NULL; t = t->beneath)
2799 {
2800 if (t->to_thread_alive != NULL)
2801 {
2802 int retval;
2803
2804 retval = t->to_thread_alive (t, ptid);
2805 if (targetdebug)
2806 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
2807 PIDGET (ptid), retval);
2808
2809 return retval;
2810 }
2811 }
2812
2813 return 0;
2814 }
2815
2816 void
2817 target_find_new_threads (void)
2818 {
2819 struct target_ops *t;
2820 for (t = current_target.beneath; t != NULL; t = t->beneath)
2821 {
2822 if (t->to_find_new_threads != NULL)
2823 {
2824 t->to_find_new_threads (t);
2825 if (targetdebug)
2826 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
2827
2828 return;
2829 }
2830 }
2831 }
2832
2833 static void
2834 debug_to_post_attach (int pid)
2835 {
2836 debug_target.to_post_attach (pid);
2837
2838 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
2839 }
2840
2841 /* Return a pretty printed form of target_waitstatus.
2842 Space for the result is malloc'd, caller must free. */
2843
2844 char *
2845 target_waitstatus_to_string (const struct target_waitstatus *ws)
2846 {
2847 const char *kind_str = "status->kind = ";
2848
2849 switch (ws->kind)
2850 {
2851 case TARGET_WAITKIND_EXITED:
2852 return xstrprintf ("%sexited, status = %d",
2853 kind_str, ws->value.integer);
2854 case TARGET_WAITKIND_STOPPED:
2855 return xstrprintf ("%sstopped, signal = %s",
2856 kind_str, target_signal_to_name (ws->value.sig));
2857 case TARGET_WAITKIND_SIGNALLED:
2858 return xstrprintf ("%ssignalled, signal = %s",
2859 kind_str, target_signal_to_name (ws->value.sig));
2860 case TARGET_WAITKIND_LOADED:
2861 return xstrprintf ("%sloaded", kind_str);
2862 case TARGET_WAITKIND_FORKED:
2863 return xstrprintf ("%sforked", kind_str);
2864 case TARGET_WAITKIND_VFORKED:
2865 return xstrprintf ("%svforked", kind_str);
2866 case TARGET_WAITKIND_EXECD:
2867 return xstrprintf ("%sexecd", kind_str);
2868 case TARGET_WAITKIND_SYSCALL_ENTRY:
2869 return xstrprintf ("%ssyscall-entry", kind_str);
2870 case TARGET_WAITKIND_SYSCALL_RETURN:
2871 return xstrprintf ("%ssyscall-return", kind_str);
2872 case TARGET_WAITKIND_SPURIOUS:
2873 return xstrprintf ("%sspurious", kind_str);
2874 case TARGET_WAITKIND_IGNORE:
2875 return xstrprintf ("%signore", kind_str);
2876 case TARGET_WAITKIND_NO_HISTORY:
2877 return xstrprintf ("%sno-history", kind_str);
2878 default:
2879 return xstrprintf ("%sunknown???", kind_str);
2880 }
2881 }
2882
2883 static void
2884 debug_print_register (const char * func,
2885 struct regcache *regcache, int regno)
2886 {
2887 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2888 fprintf_unfiltered (gdb_stdlog, "%s ", func);
2889 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
2890 && gdbarch_register_name (gdbarch, regno) != NULL
2891 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
2892 fprintf_unfiltered (gdb_stdlog, "(%s)",
2893 gdbarch_register_name (gdbarch, regno));
2894 else
2895 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
2896 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
2897 {
2898 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2899 int i, size = register_size (gdbarch, regno);
2900 unsigned char buf[MAX_REGISTER_SIZE];
2901 regcache_raw_collect (regcache, regno, buf);
2902 fprintf_unfiltered (gdb_stdlog, " = ");
2903 for (i = 0; i < size; i++)
2904 {
2905 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
2906 }
2907 if (size <= sizeof (LONGEST))
2908 {
2909 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
2910 fprintf_unfiltered (gdb_stdlog, " %s %s",
2911 core_addr_to_string_nz (val), plongest (val));
2912 }
2913 }
2914 fprintf_unfiltered (gdb_stdlog, "\n");
2915 }
2916
2917 void
2918 target_fetch_registers (struct regcache *regcache, int regno)
2919 {
2920 struct target_ops *t;
2921 for (t = current_target.beneath; t != NULL; t = t->beneath)
2922 {
2923 if (t->to_fetch_registers != NULL)
2924 {
2925 t->to_fetch_registers (t, regcache, regno);
2926 if (targetdebug)
2927 debug_print_register ("target_fetch_registers", regcache, regno);
2928 return;
2929 }
2930 }
2931 }
2932
2933 void
2934 target_store_registers (struct regcache *regcache, int regno)
2935 {
2936
2937 struct target_ops *t;
2938 for (t = current_target.beneath; t != NULL; t = t->beneath)
2939 {
2940 if (t->to_store_registers != NULL)
2941 {
2942 t->to_store_registers (t, regcache, regno);
2943 if (targetdebug)
2944 {
2945 debug_print_register ("target_store_registers", regcache, regno);
2946 }
2947 return;
2948 }
2949 }
2950
2951 noprocess ();
2952 }
2953
2954 static void
2955 debug_to_prepare_to_store (struct regcache *regcache)
2956 {
2957 debug_target.to_prepare_to_store (regcache);
2958
2959 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
2960 }
2961
2962 static int
2963 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
2964 int write, struct mem_attrib *attrib,
2965 struct target_ops *target)
2966 {
2967 int retval;
2968
2969 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
2970 attrib, target);
2971
2972 fprintf_unfiltered (gdb_stdlog,
2973 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
2974 paddress (target_gdbarch, memaddr), len,
2975 write ? "write" : "read", retval);
2976
2977 if (retval > 0)
2978 {
2979 int i;
2980
2981 fputs_unfiltered (", bytes =", gdb_stdlog);
2982 for (i = 0; i < retval; i++)
2983 {
2984 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
2985 {
2986 if (targetdebug < 2 && i > 0)
2987 {
2988 fprintf_unfiltered (gdb_stdlog, " ...");
2989 break;
2990 }
2991 fprintf_unfiltered (gdb_stdlog, "\n");
2992 }
2993
2994 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
2995 }
2996 }
2997
2998 fputc_unfiltered ('\n', gdb_stdlog);
2999
3000 return retval;
3001 }
3002
3003 static void
3004 debug_to_files_info (struct target_ops *target)
3005 {
3006 debug_target.to_files_info (target);
3007
3008 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3009 }
3010
3011 static int
3012 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3013 struct bp_target_info *bp_tgt)
3014 {
3015 int retval;
3016
3017 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3018
3019 fprintf_unfiltered (gdb_stdlog,
3020 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
3021 (unsigned long) bp_tgt->placed_address,
3022 (unsigned long) retval);
3023 return retval;
3024 }
3025
3026 static int
3027 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3028 struct bp_target_info *bp_tgt)
3029 {
3030 int retval;
3031
3032 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3033
3034 fprintf_unfiltered (gdb_stdlog,
3035 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
3036 (unsigned long) bp_tgt->placed_address,
3037 (unsigned long) retval);
3038 return retval;
3039 }
3040
3041 static int
3042 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3043 {
3044 int retval;
3045
3046 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3047
3048 fprintf_unfiltered (gdb_stdlog,
3049 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3050 (unsigned long) type,
3051 (unsigned long) cnt,
3052 (unsigned long) from_tty,
3053 (unsigned long) retval);
3054 return retval;
3055 }
3056
3057 static int
3058 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3059 {
3060 CORE_ADDR retval;
3061
3062 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3063
3064 fprintf_unfiltered (gdb_stdlog,
3065 "target_region_ok_for_hw_watchpoint (%ld, %ld) = 0x%lx\n",
3066 (unsigned long) addr,
3067 (unsigned long) len,
3068 (unsigned long) retval);
3069 return retval;
3070 }
3071
3072 static int
3073 debug_to_stopped_by_watchpoint (void)
3074 {
3075 int retval;
3076
3077 retval = debug_target.to_stopped_by_watchpoint ();
3078
3079 fprintf_unfiltered (gdb_stdlog,
3080 "target_stopped_by_watchpoint () = %ld\n",
3081 (unsigned long) retval);
3082 return retval;
3083 }
3084
3085 static int
3086 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3087 {
3088 int retval;
3089
3090 retval = debug_target.to_stopped_data_address (target, addr);
3091
3092 fprintf_unfiltered (gdb_stdlog,
3093 "target_stopped_data_address ([0x%lx]) = %ld\n",
3094 (unsigned long)*addr,
3095 (unsigned long)retval);
3096 return retval;
3097 }
3098
3099 static int
3100 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3101 CORE_ADDR addr,
3102 CORE_ADDR start, int length)
3103 {
3104 int retval;
3105
3106 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3107 start, length);
3108
3109 fprintf_filtered (gdb_stdlog,
3110 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
3111 (unsigned long) addr, (unsigned long) start, length,
3112 retval);
3113 return retval;
3114 }
3115
3116 static int
3117 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3118 struct bp_target_info *bp_tgt)
3119 {
3120 int retval;
3121
3122 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3123
3124 fprintf_unfiltered (gdb_stdlog,
3125 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
3126 (unsigned long) bp_tgt->placed_address,
3127 (unsigned long) retval);
3128 return retval;
3129 }
3130
3131 static int
3132 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3133 struct bp_target_info *bp_tgt)
3134 {
3135 int retval;
3136
3137 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3138
3139 fprintf_unfiltered (gdb_stdlog,
3140 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
3141 (unsigned long) bp_tgt->placed_address,
3142 (unsigned long) retval);
3143 return retval;
3144 }
3145
3146 static int
3147 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
3148 {
3149 int retval;
3150
3151 retval = debug_target.to_insert_watchpoint (addr, len, type);
3152
3153 fprintf_unfiltered (gdb_stdlog,
3154 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
3155 (unsigned long) addr, len, type, (unsigned long) retval);
3156 return retval;
3157 }
3158
3159 static int
3160 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
3161 {
3162 int retval;
3163
3164 retval = debug_target.to_remove_watchpoint (addr, len, type);
3165
3166 fprintf_unfiltered (gdb_stdlog,
3167 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
3168 (unsigned long) addr, len, type, (unsigned long) retval);
3169 return retval;
3170 }
3171
3172 static void
3173 debug_to_terminal_init (void)
3174 {
3175 debug_target.to_terminal_init ();
3176
3177 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3178 }
3179
3180 static void
3181 debug_to_terminal_inferior (void)
3182 {
3183 debug_target.to_terminal_inferior ();
3184
3185 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3186 }
3187
3188 static void
3189 debug_to_terminal_ours_for_output (void)
3190 {
3191 debug_target.to_terminal_ours_for_output ();
3192
3193 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3194 }
3195
3196 static void
3197 debug_to_terminal_ours (void)
3198 {
3199 debug_target.to_terminal_ours ();
3200
3201 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3202 }
3203
3204 static void
3205 debug_to_terminal_save_ours (void)
3206 {
3207 debug_target.to_terminal_save_ours ();
3208
3209 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3210 }
3211
3212 static void
3213 debug_to_terminal_info (char *arg, int from_tty)
3214 {
3215 debug_target.to_terminal_info (arg, from_tty);
3216
3217 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3218 from_tty);
3219 }
3220
3221 static void
3222 debug_to_load (char *args, int from_tty)
3223 {
3224 debug_target.to_load (args, from_tty);
3225
3226 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3227 }
3228
3229 static int
3230 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3231 {
3232 int retval;
3233
3234 retval = debug_target.to_lookup_symbol (name, addrp);
3235
3236 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3237
3238 return retval;
3239 }
3240
3241 static void
3242 debug_to_post_startup_inferior (ptid_t ptid)
3243 {
3244 debug_target.to_post_startup_inferior (ptid);
3245
3246 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3247 PIDGET (ptid));
3248 }
3249
3250 static void
3251 debug_to_acknowledge_created_inferior (int pid)
3252 {
3253 debug_target.to_acknowledge_created_inferior (pid);
3254
3255 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
3256 pid);
3257 }
3258
3259 static void
3260 debug_to_insert_fork_catchpoint (int pid)
3261 {
3262 debug_target.to_insert_fork_catchpoint (pid);
3263
3264 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
3265 pid);
3266 }
3267
3268 static int
3269 debug_to_remove_fork_catchpoint (int pid)
3270 {
3271 int retval;
3272
3273 retval = debug_target.to_remove_fork_catchpoint (pid);
3274
3275 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3276 pid, retval);
3277
3278 return retval;
3279 }
3280
3281 static void
3282 debug_to_insert_vfork_catchpoint (int pid)
3283 {
3284 debug_target.to_insert_vfork_catchpoint (pid);
3285
3286 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
3287 pid);
3288 }
3289
3290 static int
3291 debug_to_remove_vfork_catchpoint (int pid)
3292 {
3293 int retval;
3294
3295 retval = debug_target.to_remove_vfork_catchpoint (pid);
3296
3297 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3298 pid, retval);
3299
3300 return retval;
3301 }
3302
3303 static void
3304 debug_to_insert_exec_catchpoint (int pid)
3305 {
3306 debug_target.to_insert_exec_catchpoint (pid);
3307
3308 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
3309 pid);
3310 }
3311
3312 static int
3313 debug_to_remove_exec_catchpoint (int pid)
3314 {
3315 int retval;
3316
3317 retval = debug_target.to_remove_exec_catchpoint (pid);
3318
3319 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3320 pid, retval);
3321
3322 return retval;
3323 }
3324
3325 static int
3326 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3327 {
3328 int has_exited;
3329
3330 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3331
3332 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3333 pid, wait_status, *exit_status, has_exited);
3334
3335 return has_exited;
3336 }
3337
3338 static int
3339 debug_to_can_run (void)
3340 {
3341 int retval;
3342
3343 retval = debug_target.to_can_run ();
3344
3345 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3346
3347 return retval;
3348 }
3349
3350 static void
3351 debug_to_notice_signals (ptid_t ptid)
3352 {
3353 debug_target.to_notice_signals (ptid);
3354
3355 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3356 PIDGET (ptid));
3357 }
3358
3359 static struct gdbarch *
3360 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3361 {
3362 struct gdbarch *retval;
3363
3364 retval = debug_target.to_thread_architecture (ops, ptid);
3365
3366 fprintf_unfiltered (gdb_stdlog, "target_thread_architecture (%s) = %p [%s]\n",
3367 target_pid_to_str (ptid), retval,
3368 gdbarch_bfd_arch_info (retval)->printable_name);
3369 return retval;
3370 }
3371
3372 static void
3373 debug_to_stop (ptid_t ptid)
3374 {
3375 debug_target.to_stop (ptid);
3376
3377 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3378 target_pid_to_str (ptid));
3379 }
3380
3381 static void
3382 debug_to_rcmd (char *command,
3383 struct ui_file *outbuf)
3384 {
3385 debug_target.to_rcmd (command, outbuf);
3386 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3387 }
3388
3389 static char *
3390 debug_to_pid_to_exec_file (int pid)
3391 {
3392 char *exec_file;
3393
3394 exec_file = debug_target.to_pid_to_exec_file (pid);
3395
3396 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3397 pid, exec_file);
3398
3399 return exec_file;
3400 }
3401
3402 static void
3403 setup_target_debug (void)
3404 {
3405 memcpy (&debug_target, &current_target, sizeof debug_target);
3406
3407 current_target.to_open = debug_to_open;
3408 current_target.to_post_attach = debug_to_post_attach;
3409 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3410 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3411 current_target.to_files_info = debug_to_files_info;
3412 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3413 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3414 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3415 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3416 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3417 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3418 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3419 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3420 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3421 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3422 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3423 current_target.to_terminal_init = debug_to_terminal_init;
3424 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3425 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3426 current_target.to_terminal_ours = debug_to_terminal_ours;
3427 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3428 current_target.to_terminal_info = debug_to_terminal_info;
3429 current_target.to_load = debug_to_load;
3430 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3431 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3432 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3433 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3434 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3435 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3436 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3437 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3438 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3439 current_target.to_has_exited = debug_to_has_exited;
3440 current_target.to_can_run = debug_to_can_run;
3441 current_target.to_notice_signals = debug_to_notice_signals;
3442 current_target.to_stop = debug_to_stop;
3443 current_target.to_rcmd = debug_to_rcmd;
3444 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3445 current_target.to_thread_architecture = debug_to_thread_architecture;
3446 }
3447 \f
3448
3449 static char targ_desc[] =
3450 "Names of targets and files being debugged.\n\
3451 Shows the entire stack of targets currently in use (including the exec-file,\n\
3452 core-file, and process, if any), as well as the symbol file name.";
3453
3454 static void
3455 do_monitor_command (char *cmd,
3456 int from_tty)
3457 {
3458 if ((current_target.to_rcmd
3459 == (void (*) (char *, struct ui_file *)) tcomplain)
3460 || (current_target.to_rcmd == debug_to_rcmd
3461 && (debug_target.to_rcmd
3462 == (void (*) (char *, struct ui_file *)) tcomplain)))
3463 error (_("\"monitor\" command not supported by this target."));
3464 target_rcmd (cmd, gdb_stdtarg);
3465 }
3466
3467 /* Print the name of each layers of our target stack. */
3468
3469 static void
3470 maintenance_print_target_stack (char *cmd, int from_tty)
3471 {
3472 struct target_ops *t;
3473
3474 printf_filtered (_("The current target stack is:\n"));
3475
3476 for (t = target_stack; t != NULL; t = t->beneath)
3477 {
3478 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3479 }
3480 }
3481
3482 /* Controls if async mode is permitted. */
3483 int target_async_permitted = 0;
3484
3485 /* The set command writes to this variable. If the inferior is
3486 executing, linux_nat_async_permitted is *not* updated. */
3487 static int target_async_permitted_1 = 0;
3488
3489 static void
3490 set_maintenance_target_async_permitted (char *args, int from_tty,
3491 struct cmd_list_element *c)
3492 {
3493 if (have_live_inferiors ())
3494 {
3495 target_async_permitted_1 = target_async_permitted;
3496 error (_("Cannot change this setting while the inferior is running."));
3497 }
3498
3499 target_async_permitted = target_async_permitted_1;
3500 }
3501
3502 static void
3503 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3504 struct cmd_list_element *c,
3505 const char *value)
3506 {
3507 fprintf_filtered (file, _("\
3508 Controlling the inferior in asynchronous mode is %s.\n"), value);
3509 }
3510
3511 void
3512 initialize_targets (void)
3513 {
3514 init_dummy_target ();
3515 push_target (&dummy_target);
3516
3517 add_info ("target", target_info, targ_desc);
3518 add_info ("files", target_info, targ_desc);
3519
3520 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3521 Set target debugging."), _("\
3522 Show target debugging."), _("\
3523 When non-zero, target debugging is enabled. Higher numbers are more\n\
3524 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3525 command."),
3526 NULL,
3527 show_targetdebug,
3528 &setdebuglist, &showdebuglist);
3529
3530 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3531 &trust_readonly, _("\
3532 Set mode for reading from readonly sections."), _("\
3533 Show mode for reading from readonly sections."), _("\
3534 When this mode is on, memory reads from readonly sections (such as .text)\n\
3535 will be read from the object file instead of from the target. This will\n\
3536 result in significant performance improvement for remote targets."),
3537 NULL,
3538 show_trust_readonly,
3539 &setlist, &showlist);
3540
3541 add_com ("monitor", class_obscure, do_monitor_command,
3542 _("Send a command to the remote monitor (remote targets only)."));
3543
3544 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3545 _("Print the name of each layer of the internal target stack."),
3546 &maintenanceprintlist);
3547
3548 add_setshow_boolean_cmd ("target-async", no_class,
3549 &target_async_permitted_1, _("\
3550 Set whether gdb controls the inferior in asynchronous mode."), _("\
3551 Show whether gdb controls the inferior in asynchronous mode."), _("\
3552 Tells gdb whether to control the inferior in asynchronous mode."),
3553 set_maintenance_target_async_permitted,
3554 show_maintenance_target_async_permitted,
3555 &setlist,
3556 &showlist);
3557
3558 add_setshow_boolean_cmd ("stack-cache", class_support,
3559 &stack_cache_enabled_p_1, _("\
3560 Set cache use for stack access."), _("\
3561 Show cache use for stack access."), _("\
3562 When on, use the data cache for all stack access, regardless of any\n\
3563 configured memory regions. This improves remote performance significantly.\n\
3564 By default, caching for stack access is on."),
3565 set_stack_cache_enabled_p,
3566 show_stack_cache_enabled_p,
3567 &setlist, &showlist);
3568
3569 target_dcache = dcache_init ();
3570 }
This page took 0.103946 seconds and 4 git commands to generate.