Remove the attach_flag global, and make it per-inferior.
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44
45 static void target_info (char *, int);
46
47 static void kill_or_be_killed (int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static int nosymbol (char *, CORE_ADDR *);
57
58 static void tcomplain (void) ATTR_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 void target_ignore (void);
69
70 static void target_command (char *, int);
71
72 static struct target_ops *find_default_run_target (char *);
73
74 static void nosupport_runtime (void);
75
76 static LONGEST default_xfer_partial (struct target_ops *ops,
77 enum target_object object,
78 const char *annex, gdb_byte *readbuf,
79 const gdb_byte *writebuf,
80 ULONGEST offset, LONGEST len);
81
82 static LONGEST current_xfer_partial (struct target_ops *ops,
83 enum target_object object,
84 const char *annex, gdb_byte *readbuf,
85 const gdb_byte *writebuf,
86 ULONGEST offset, LONGEST len);
87
88 static LONGEST target_xfer_partial (struct target_ops *ops,
89 enum target_object object,
90 const char *annex,
91 void *readbuf, const void *writebuf,
92 ULONGEST offset, LONGEST len);
93
94 static void init_dummy_target (void);
95
96 static struct target_ops debug_target;
97
98 static void debug_to_open (char *, int);
99
100 static void debug_to_close (int);
101
102 static void debug_to_attach (char *, int);
103
104 static void debug_to_detach (char *, int);
105
106 static void debug_to_resume (ptid_t, int, enum target_signal);
107
108 static ptid_t debug_to_wait (ptid_t, struct target_waitstatus *);
109
110 static void debug_to_fetch_registers (struct regcache *, int);
111
112 static void debug_to_store_registers (struct regcache *, int);
113
114 static void debug_to_prepare_to_store (struct regcache *);
115
116 static void debug_to_files_info (struct target_ops *);
117
118 static int debug_to_insert_breakpoint (struct bp_target_info *);
119
120 static int debug_to_remove_breakpoint (struct bp_target_info *);
121
122 static int debug_to_can_use_hw_breakpoint (int, int, int);
123
124 static int debug_to_insert_hw_breakpoint (struct bp_target_info *);
125
126 static int debug_to_remove_hw_breakpoint (struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
129
130 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
131
132 static int debug_to_stopped_by_watchpoint (void);
133
134 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
135
136 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
137 CORE_ADDR, CORE_ADDR, int);
138
139 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
140
141 static void debug_to_terminal_init (void);
142
143 static void debug_to_terminal_inferior (void);
144
145 static void debug_to_terminal_ours_for_output (void);
146
147 static void debug_to_terminal_save_ours (void);
148
149 static void debug_to_terminal_ours (void);
150
151 static void debug_to_terminal_info (char *, int);
152
153 static void debug_to_kill (void);
154
155 static void debug_to_load (char *, int);
156
157 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
158
159 static void debug_to_mourn_inferior (void);
160
161 static int debug_to_can_run (void);
162
163 static void debug_to_notice_signals (ptid_t);
164
165 static int debug_to_thread_alive (ptid_t);
166
167 static void debug_to_stop (ptid_t);
168
169 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
170 wierd and mysterious ways. Putting the variable here lets those
171 wierd and mysterious ways keep building while they are being
172 converted to the inferior inheritance structure. */
173 struct target_ops deprecated_child_ops;
174
175 /* Pointer to array of target architecture structures; the size of the
176 array; the current index into the array; the allocated size of the
177 array. */
178 struct target_ops **target_structs;
179 unsigned target_struct_size;
180 unsigned target_struct_index;
181 unsigned target_struct_allocsize;
182 #define DEFAULT_ALLOCSIZE 10
183
184 /* The initial current target, so that there is always a semi-valid
185 current target. */
186
187 static struct target_ops dummy_target;
188
189 /* Top of target stack. */
190
191 static struct target_ops *target_stack;
192
193 /* The target structure we are currently using to talk to a process
194 or file or whatever "inferior" we have. */
195
196 struct target_ops current_target;
197
198 /* Command list for target. */
199
200 static struct cmd_list_element *targetlist = NULL;
201
202 /* Nonzero if we should trust readonly sections from the
203 executable when reading memory. */
204
205 static int trust_readonly = 0;
206
207 /* Nonzero if we should show true memory content including
208 memory breakpoint inserted by gdb. */
209
210 static int show_memory_breakpoints = 0;
211
212 /* Non-zero if we want to see trace of target level stuff. */
213
214 static int targetdebug = 0;
215 static void
216 show_targetdebug (struct ui_file *file, int from_tty,
217 struct cmd_list_element *c, const char *value)
218 {
219 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
220 }
221
222 static void setup_target_debug (void);
223
224 DCACHE *target_dcache;
225
226 /* The user just typed 'target' without the name of a target. */
227
228 static void
229 target_command (char *arg, int from_tty)
230 {
231 fputs_filtered ("Argument required (target name). Try `help target'\n",
232 gdb_stdout);
233 }
234
235 /* Add a possible target architecture to the list. */
236
237 void
238 add_target (struct target_ops *t)
239 {
240 /* Provide default values for all "must have" methods. */
241 if (t->to_xfer_partial == NULL)
242 t->to_xfer_partial = default_xfer_partial;
243
244 if (!target_structs)
245 {
246 target_struct_allocsize = DEFAULT_ALLOCSIZE;
247 target_structs = (struct target_ops **) xmalloc
248 (target_struct_allocsize * sizeof (*target_structs));
249 }
250 if (target_struct_size >= target_struct_allocsize)
251 {
252 target_struct_allocsize *= 2;
253 target_structs = (struct target_ops **)
254 xrealloc ((char *) target_structs,
255 target_struct_allocsize * sizeof (*target_structs));
256 }
257 target_structs[target_struct_size++] = t;
258
259 if (targetlist == NULL)
260 add_prefix_cmd ("target", class_run, target_command, _("\
261 Connect to a target machine or process.\n\
262 The first argument is the type or protocol of the target machine.\n\
263 Remaining arguments are interpreted by the target protocol. For more\n\
264 information on the arguments for a particular protocol, type\n\
265 `help target ' followed by the protocol name."),
266 &targetlist, "target ", 0, &cmdlist);
267 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
268 }
269
270 /* Stub functions */
271
272 void
273 target_ignore (void)
274 {
275 }
276
277 void
278 target_load (char *arg, int from_tty)
279 {
280 dcache_invalidate (target_dcache);
281 (*current_target.to_load) (arg, from_tty);
282 }
283
284 static int
285 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
286 struct target_ops *t)
287 {
288 errno = EIO; /* Can't read/write this location */
289 return 0; /* No bytes handled */
290 }
291
292 static void
293 tcomplain (void)
294 {
295 error (_("You can't do that when your target is `%s'"),
296 current_target.to_shortname);
297 }
298
299 void
300 noprocess (void)
301 {
302 error (_("You can't do that without a process to debug."));
303 }
304
305 static int
306 nosymbol (char *name, CORE_ADDR *addrp)
307 {
308 return 1; /* Symbol does not exist in target env */
309 }
310
311 static void
312 nosupport_runtime (void)
313 {
314 if (ptid_equal (inferior_ptid, null_ptid))
315 noprocess ();
316 else
317 error (_("No run-time support for this"));
318 }
319
320
321 static void
322 default_terminal_info (char *args, int from_tty)
323 {
324 printf_unfiltered (_("No saved terminal information.\n"));
325 }
326
327 /* This is the default target_create_inferior and target_attach function.
328 If the current target is executing, it asks whether to kill it off.
329 If this function returns without calling error(), it has killed off
330 the target, and the operation should be attempted. */
331
332 static void
333 kill_or_be_killed (int from_tty)
334 {
335 if (target_has_execution)
336 {
337 printf_unfiltered (_("You are already running a program:\n"));
338 target_files_info ();
339 if (query ("Kill it? "))
340 {
341 target_kill ();
342 if (target_has_execution)
343 error (_("Killing the program did not help."));
344 return;
345 }
346 else
347 {
348 error (_("Program not killed."));
349 }
350 }
351 tcomplain ();
352 }
353
354 /* Go through the target stack from top to bottom, copying over zero
355 entries in current_target, then filling in still empty entries. In
356 effect, we are doing class inheritance through the pushed target
357 vectors.
358
359 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
360 is currently implemented, is that it discards any knowledge of
361 which target an inherited method originally belonged to.
362 Consequently, new new target methods should instead explicitly and
363 locally search the target stack for the target that can handle the
364 request. */
365
366 static void
367 update_current_target (void)
368 {
369 struct target_ops *t;
370
371 /* First, reset current's contents. */
372 memset (&current_target, 0, sizeof (current_target));
373
374 #define INHERIT(FIELD, TARGET) \
375 if (!current_target.FIELD) \
376 current_target.FIELD = (TARGET)->FIELD
377
378 for (t = target_stack; t; t = t->beneath)
379 {
380 INHERIT (to_shortname, t);
381 INHERIT (to_longname, t);
382 INHERIT (to_doc, t);
383 /* Do not inherit to_open. */
384 /* Do not inherit to_close. */
385 INHERIT (to_attach, t);
386 INHERIT (to_post_attach, t);
387 INHERIT (to_attach_no_wait, t);
388 INHERIT (to_detach, t);
389 /* Do not inherit to_disconnect. */
390 INHERIT (to_resume, t);
391 INHERIT (to_wait, t);
392 INHERIT (to_fetch_registers, t);
393 INHERIT (to_store_registers, t);
394 INHERIT (to_prepare_to_store, t);
395 INHERIT (deprecated_xfer_memory, t);
396 INHERIT (to_files_info, t);
397 INHERIT (to_insert_breakpoint, t);
398 INHERIT (to_remove_breakpoint, t);
399 INHERIT (to_can_use_hw_breakpoint, t);
400 INHERIT (to_insert_hw_breakpoint, t);
401 INHERIT (to_remove_hw_breakpoint, t);
402 INHERIT (to_insert_watchpoint, t);
403 INHERIT (to_remove_watchpoint, t);
404 INHERIT (to_stopped_data_address, t);
405 INHERIT (to_have_steppable_watchpoint, t);
406 INHERIT (to_have_continuable_watchpoint, t);
407 INHERIT (to_stopped_by_watchpoint, t);
408 INHERIT (to_watchpoint_addr_within_range, t);
409 INHERIT (to_region_ok_for_hw_watchpoint, t);
410 INHERIT (to_terminal_init, t);
411 INHERIT (to_terminal_inferior, t);
412 INHERIT (to_terminal_ours_for_output, t);
413 INHERIT (to_terminal_ours, t);
414 INHERIT (to_terminal_save_ours, t);
415 INHERIT (to_terminal_info, t);
416 INHERIT (to_kill, t);
417 INHERIT (to_load, t);
418 INHERIT (to_lookup_symbol, t);
419 INHERIT (to_create_inferior, t);
420 INHERIT (to_post_startup_inferior, t);
421 INHERIT (to_acknowledge_created_inferior, t);
422 INHERIT (to_insert_fork_catchpoint, t);
423 INHERIT (to_remove_fork_catchpoint, t);
424 INHERIT (to_insert_vfork_catchpoint, t);
425 INHERIT (to_remove_vfork_catchpoint, t);
426 /* Do not inherit to_follow_fork. */
427 INHERIT (to_insert_exec_catchpoint, t);
428 INHERIT (to_remove_exec_catchpoint, t);
429 INHERIT (to_has_exited, t);
430 INHERIT (to_mourn_inferior, t);
431 INHERIT (to_can_run, t);
432 INHERIT (to_notice_signals, t);
433 INHERIT (to_thread_alive, t);
434 INHERIT (to_find_new_threads, t);
435 INHERIT (to_pid_to_str, t);
436 INHERIT (to_extra_thread_info, t);
437 INHERIT (to_stop, t);
438 /* Do not inherit to_xfer_partial. */
439 INHERIT (to_rcmd, t);
440 INHERIT (to_pid_to_exec_file, t);
441 INHERIT (to_log_command, t);
442 INHERIT (to_stratum, t);
443 INHERIT (to_has_all_memory, t);
444 INHERIT (to_has_memory, t);
445 INHERIT (to_has_stack, t);
446 INHERIT (to_has_registers, t);
447 INHERIT (to_has_execution, t);
448 INHERIT (to_has_thread_control, t);
449 INHERIT (to_sections, t);
450 INHERIT (to_sections_end, t);
451 INHERIT (to_can_async_p, t);
452 INHERIT (to_is_async_p, t);
453 INHERIT (to_async, t);
454 INHERIT (to_async_mask, t);
455 INHERIT (to_find_memory_regions, t);
456 INHERIT (to_make_corefile_notes, t);
457 INHERIT (to_get_thread_local_address, t);
458 /* Do not inherit to_read_description. */
459 /* Do not inherit to_search_memory. */
460 INHERIT (to_magic, t);
461 /* Do not inherit to_memory_map. */
462 /* Do not inherit to_flash_erase. */
463 /* Do not inherit to_flash_done. */
464 }
465 #undef INHERIT
466
467 /* Clean up a target struct so it no longer has any zero pointers in
468 it. Some entries are defaulted to a method that print an error,
469 others are hard-wired to a standard recursive default. */
470
471 #define de_fault(field, value) \
472 if (!current_target.field) \
473 current_target.field = value
474
475 de_fault (to_open,
476 (void (*) (char *, int))
477 tcomplain);
478 de_fault (to_close,
479 (void (*) (int))
480 target_ignore);
481 de_fault (to_post_attach,
482 (void (*) (int))
483 target_ignore);
484 de_fault (to_detach,
485 (void (*) (char *, int))
486 target_ignore);
487 de_fault (to_resume,
488 (void (*) (ptid_t, int, enum target_signal))
489 noprocess);
490 de_fault (to_wait,
491 (ptid_t (*) (ptid_t, struct target_waitstatus *))
492 noprocess);
493 de_fault (to_fetch_registers,
494 (void (*) (struct regcache *, int))
495 target_ignore);
496 de_fault (to_store_registers,
497 (void (*) (struct regcache *, int))
498 noprocess);
499 de_fault (to_prepare_to_store,
500 (void (*) (struct regcache *))
501 noprocess);
502 de_fault (deprecated_xfer_memory,
503 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
504 nomemory);
505 de_fault (to_files_info,
506 (void (*) (struct target_ops *))
507 target_ignore);
508 de_fault (to_insert_breakpoint,
509 memory_insert_breakpoint);
510 de_fault (to_remove_breakpoint,
511 memory_remove_breakpoint);
512 de_fault (to_can_use_hw_breakpoint,
513 (int (*) (int, int, int))
514 return_zero);
515 de_fault (to_insert_hw_breakpoint,
516 (int (*) (struct bp_target_info *))
517 return_minus_one);
518 de_fault (to_remove_hw_breakpoint,
519 (int (*) (struct bp_target_info *))
520 return_minus_one);
521 de_fault (to_insert_watchpoint,
522 (int (*) (CORE_ADDR, int, int))
523 return_minus_one);
524 de_fault (to_remove_watchpoint,
525 (int (*) (CORE_ADDR, int, int))
526 return_minus_one);
527 de_fault (to_stopped_by_watchpoint,
528 (int (*) (void))
529 return_zero);
530 de_fault (to_stopped_data_address,
531 (int (*) (struct target_ops *, CORE_ADDR *))
532 return_zero);
533 de_fault (to_watchpoint_addr_within_range,
534 default_watchpoint_addr_within_range);
535 de_fault (to_region_ok_for_hw_watchpoint,
536 default_region_ok_for_hw_watchpoint);
537 de_fault (to_terminal_init,
538 (void (*) (void))
539 target_ignore);
540 de_fault (to_terminal_inferior,
541 (void (*) (void))
542 target_ignore);
543 de_fault (to_terminal_ours_for_output,
544 (void (*) (void))
545 target_ignore);
546 de_fault (to_terminal_ours,
547 (void (*) (void))
548 target_ignore);
549 de_fault (to_terminal_save_ours,
550 (void (*) (void))
551 target_ignore);
552 de_fault (to_terminal_info,
553 default_terminal_info);
554 de_fault (to_kill,
555 (void (*) (void))
556 noprocess);
557 de_fault (to_load,
558 (void (*) (char *, int))
559 tcomplain);
560 de_fault (to_lookup_symbol,
561 (int (*) (char *, CORE_ADDR *))
562 nosymbol);
563 de_fault (to_post_startup_inferior,
564 (void (*) (ptid_t))
565 target_ignore);
566 de_fault (to_acknowledge_created_inferior,
567 (void (*) (int))
568 target_ignore);
569 de_fault (to_insert_fork_catchpoint,
570 (void (*) (int))
571 tcomplain);
572 de_fault (to_remove_fork_catchpoint,
573 (int (*) (int))
574 tcomplain);
575 de_fault (to_insert_vfork_catchpoint,
576 (void (*) (int))
577 tcomplain);
578 de_fault (to_remove_vfork_catchpoint,
579 (int (*) (int))
580 tcomplain);
581 de_fault (to_insert_exec_catchpoint,
582 (void (*) (int))
583 tcomplain);
584 de_fault (to_remove_exec_catchpoint,
585 (int (*) (int))
586 tcomplain);
587 de_fault (to_has_exited,
588 (int (*) (int, int, int *))
589 return_zero);
590 de_fault (to_mourn_inferior,
591 (void (*) (void))
592 noprocess);
593 de_fault (to_can_run,
594 return_zero);
595 de_fault (to_notice_signals,
596 (void (*) (ptid_t))
597 target_ignore);
598 de_fault (to_thread_alive,
599 (int (*) (ptid_t))
600 return_zero);
601 de_fault (to_find_new_threads,
602 (void (*) (void))
603 target_ignore);
604 de_fault (to_extra_thread_info,
605 (char *(*) (struct thread_info *))
606 return_zero);
607 de_fault (to_stop,
608 (void (*) (ptid_t))
609 target_ignore);
610 current_target.to_xfer_partial = current_xfer_partial;
611 de_fault (to_rcmd,
612 (void (*) (char *, struct ui_file *))
613 tcomplain);
614 de_fault (to_pid_to_exec_file,
615 (char *(*) (int))
616 return_zero);
617 de_fault (to_async,
618 (void (*) (void (*) (enum inferior_event_type, void*), void*))
619 tcomplain);
620 de_fault (to_async_mask,
621 (int (*) (int))
622 return_one);
623 current_target.to_read_description = NULL;
624 #undef de_fault
625
626 /* Finally, position the target-stack beneath the squashed
627 "current_target". That way code looking for a non-inherited
628 target method can quickly and simply find it. */
629 current_target.beneath = target_stack;
630
631 if (targetdebug)
632 setup_target_debug ();
633 }
634
635 /* Mark OPS as a running target. This reverses the effect
636 of target_mark_exited. */
637
638 void
639 target_mark_running (struct target_ops *ops)
640 {
641 struct target_ops *t;
642
643 for (t = target_stack; t != NULL; t = t->beneath)
644 if (t == ops)
645 break;
646 if (t == NULL)
647 internal_error (__FILE__, __LINE__,
648 "Attempted to mark unpushed target \"%s\" as running",
649 ops->to_shortname);
650
651 ops->to_has_execution = 1;
652 ops->to_has_all_memory = 1;
653 ops->to_has_memory = 1;
654 ops->to_has_stack = 1;
655 ops->to_has_registers = 1;
656
657 update_current_target ();
658 }
659
660 /* Mark OPS as a non-running target. This reverses the effect
661 of target_mark_running. */
662
663 void
664 target_mark_exited (struct target_ops *ops)
665 {
666 struct target_ops *t;
667
668 for (t = target_stack; t != NULL; t = t->beneath)
669 if (t == ops)
670 break;
671 if (t == NULL)
672 internal_error (__FILE__, __LINE__,
673 "Attempted to mark unpushed target \"%s\" as running",
674 ops->to_shortname);
675
676 ops->to_has_execution = 0;
677 ops->to_has_all_memory = 0;
678 ops->to_has_memory = 0;
679 ops->to_has_stack = 0;
680 ops->to_has_registers = 0;
681
682 update_current_target ();
683 }
684
685 /* Push a new target type into the stack of the existing target accessors,
686 possibly superseding some of the existing accessors.
687
688 Result is zero if the pushed target ended up on top of the stack,
689 nonzero if at least one target is on top of it.
690
691 Rather than allow an empty stack, we always have the dummy target at
692 the bottom stratum, so we can call the function vectors without
693 checking them. */
694
695 int
696 push_target (struct target_ops *t)
697 {
698 struct target_ops **cur;
699
700 /* Check magic number. If wrong, it probably means someone changed
701 the struct definition, but not all the places that initialize one. */
702 if (t->to_magic != OPS_MAGIC)
703 {
704 fprintf_unfiltered (gdb_stderr,
705 "Magic number of %s target struct wrong\n",
706 t->to_shortname);
707 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
708 }
709
710 /* Find the proper stratum to install this target in. */
711 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
712 {
713 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
714 break;
715 }
716
717 /* If there's already targets at this stratum, remove them. */
718 /* FIXME: cagney/2003-10-15: I think this should be popping all
719 targets to CUR, and not just those at this stratum level. */
720 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
721 {
722 /* There's already something at this stratum level. Close it,
723 and un-hook it from the stack. */
724 struct target_ops *tmp = (*cur);
725 (*cur) = (*cur)->beneath;
726 tmp->beneath = NULL;
727 target_close (tmp, 0);
728 }
729
730 /* We have removed all targets in our stratum, now add the new one. */
731 t->beneath = (*cur);
732 (*cur) = t;
733
734 update_current_target ();
735
736 /* Not on top? */
737 return (t != target_stack);
738 }
739
740 /* Remove a target_ops vector from the stack, wherever it may be.
741 Return how many times it was removed (0 or 1). */
742
743 int
744 unpush_target (struct target_ops *t)
745 {
746 struct target_ops **cur;
747 struct target_ops *tmp;
748
749 /* Look for the specified target. Note that we assume that a target
750 can only occur once in the target stack. */
751
752 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
753 {
754 if ((*cur) == t)
755 break;
756 }
757
758 if ((*cur) == NULL)
759 return 0; /* Didn't find target_ops, quit now */
760
761 /* NOTE: cagney/2003-12-06: In '94 the close call was made
762 unconditional by moving it to before the above check that the
763 target was in the target stack (something about "Change the way
764 pushing and popping of targets work to support target overlays
765 and inheritance"). This doesn't make much sense - only open
766 targets should be closed. */
767 target_close (t, 0);
768
769 /* Unchain the target */
770 tmp = (*cur);
771 (*cur) = (*cur)->beneath;
772 tmp->beneath = NULL;
773
774 update_current_target ();
775
776 return 1;
777 }
778
779 void
780 pop_target (void)
781 {
782 target_close (target_stack, 0); /* Let it clean up */
783 if (unpush_target (target_stack) == 1)
784 return;
785
786 fprintf_unfiltered (gdb_stderr,
787 "pop_target couldn't find target %s\n",
788 current_target.to_shortname);
789 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
790 }
791
792 void
793 pop_all_targets_above (enum strata above_stratum, int quitting)
794 {
795 while ((int) (current_target.to_stratum) > (int) above_stratum)
796 {
797 target_close (target_stack, quitting);
798 if (!unpush_target (target_stack))
799 {
800 fprintf_unfiltered (gdb_stderr,
801 "pop_all_targets couldn't find target %s\n",
802 target_stack->to_shortname);
803 internal_error (__FILE__, __LINE__,
804 _("failed internal consistency check"));
805 break;
806 }
807 }
808 }
809
810 void
811 pop_all_targets (int quitting)
812 {
813 pop_all_targets_above (dummy_stratum, quitting);
814 }
815
816 /* Using the objfile specified in OBJFILE, find the address for the
817 current thread's thread-local storage with offset OFFSET. */
818 CORE_ADDR
819 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
820 {
821 volatile CORE_ADDR addr = 0;
822
823 if (target_get_thread_local_address_p ()
824 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
825 {
826 ptid_t ptid = inferior_ptid;
827 volatile struct gdb_exception ex;
828
829 TRY_CATCH (ex, RETURN_MASK_ALL)
830 {
831 CORE_ADDR lm_addr;
832
833 /* Fetch the load module address for this objfile. */
834 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
835 objfile);
836 /* If it's 0, throw the appropriate exception. */
837 if (lm_addr == 0)
838 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
839 _("TLS load module not found"));
840
841 addr = target_get_thread_local_address (ptid, lm_addr, offset);
842 }
843 /* If an error occurred, print TLS related messages here. Otherwise,
844 throw the error to some higher catcher. */
845 if (ex.reason < 0)
846 {
847 int objfile_is_library = (objfile->flags & OBJF_SHARED);
848
849 switch (ex.error)
850 {
851 case TLS_NO_LIBRARY_SUPPORT_ERROR:
852 error (_("Cannot find thread-local variables in this thread library."));
853 break;
854 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
855 if (objfile_is_library)
856 error (_("Cannot find shared library `%s' in dynamic"
857 " linker's load module list"), objfile->name);
858 else
859 error (_("Cannot find executable file `%s' in dynamic"
860 " linker's load module list"), objfile->name);
861 break;
862 case TLS_NOT_ALLOCATED_YET_ERROR:
863 if (objfile_is_library)
864 error (_("The inferior has not yet allocated storage for"
865 " thread-local variables in\n"
866 "the shared library `%s'\n"
867 "for %s"),
868 objfile->name, target_pid_to_str (ptid));
869 else
870 error (_("The inferior has not yet allocated storage for"
871 " thread-local variables in\n"
872 "the executable `%s'\n"
873 "for %s"),
874 objfile->name, target_pid_to_str (ptid));
875 break;
876 case TLS_GENERIC_ERROR:
877 if (objfile_is_library)
878 error (_("Cannot find thread-local storage for %s, "
879 "shared library %s:\n%s"),
880 target_pid_to_str (ptid),
881 objfile->name, ex.message);
882 else
883 error (_("Cannot find thread-local storage for %s, "
884 "executable file %s:\n%s"),
885 target_pid_to_str (ptid),
886 objfile->name, ex.message);
887 break;
888 default:
889 throw_exception (ex);
890 break;
891 }
892 }
893 }
894 /* It wouldn't be wrong here to try a gdbarch method, too; finding
895 TLS is an ABI-specific thing. But we don't do that yet. */
896 else
897 error (_("Cannot find thread-local variables on this target"));
898
899 return addr;
900 }
901
902 #undef MIN
903 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
904
905 /* target_read_string -- read a null terminated string, up to LEN bytes,
906 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
907 Set *STRING to a pointer to malloc'd memory containing the data; the caller
908 is responsible for freeing it. Return the number of bytes successfully
909 read. */
910
911 int
912 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
913 {
914 int tlen, origlen, offset, i;
915 gdb_byte buf[4];
916 int errcode = 0;
917 char *buffer;
918 int buffer_allocated;
919 char *bufptr;
920 unsigned int nbytes_read = 0;
921
922 gdb_assert (string);
923
924 /* Small for testing. */
925 buffer_allocated = 4;
926 buffer = xmalloc (buffer_allocated);
927 bufptr = buffer;
928
929 origlen = len;
930
931 while (len > 0)
932 {
933 tlen = MIN (len, 4 - (memaddr & 3));
934 offset = memaddr & 3;
935
936 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
937 if (errcode != 0)
938 {
939 /* The transfer request might have crossed the boundary to an
940 unallocated region of memory. Retry the transfer, requesting
941 a single byte. */
942 tlen = 1;
943 offset = 0;
944 errcode = target_read_memory (memaddr, buf, 1);
945 if (errcode != 0)
946 goto done;
947 }
948
949 if (bufptr - buffer + tlen > buffer_allocated)
950 {
951 unsigned int bytes;
952 bytes = bufptr - buffer;
953 buffer_allocated *= 2;
954 buffer = xrealloc (buffer, buffer_allocated);
955 bufptr = buffer + bytes;
956 }
957
958 for (i = 0; i < tlen; i++)
959 {
960 *bufptr++ = buf[i + offset];
961 if (buf[i + offset] == '\000')
962 {
963 nbytes_read += i + 1;
964 goto done;
965 }
966 }
967
968 memaddr += tlen;
969 len -= tlen;
970 nbytes_read += tlen;
971 }
972 done:
973 *string = buffer;
974 if (errnop != NULL)
975 *errnop = errcode;
976 return nbytes_read;
977 }
978
979 /* Find a section containing ADDR. */
980 struct section_table *
981 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
982 {
983 struct section_table *secp;
984 for (secp = target->to_sections;
985 secp < target->to_sections_end;
986 secp++)
987 {
988 if (addr >= secp->addr && addr < secp->endaddr)
989 return secp;
990 }
991 return NULL;
992 }
993
994 /* Perform a partial memory transfer. The arguments and return
995 value are just as for target_xfer_partial. */
996
997 static LONGEST
998 memory_xfer_partial (struct target_ops *ops, void *readbuf, const void *writebuf,
999 ULONGEST memaddr, LONGEST len)
1000 {
1001 LONGEST res;
1002 int reg_len;
1003 struct mem_region *region;
1004
1005 /* Zero length requests are ok and require no work. */
1006 if (len == 0)
1007 return 0;
1008
1009 /* Try the executable file, if "trust-readonly-sections" is set. */
1010 if (readbuf != NULL && trust_readonly)
1011 {
1012 struct section_table *secp;
1013
1014 secp = target_section_by_addr (ops, memaddr);
1015 if (secp != NULL
1016 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1017 & SEC_READONLY))
1018 return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
1019 }
1020
1021 /* Likewise for accesses to unmapped overlay sections. */
1022 if (readbuf != NULL && overlay_debugging)
1023 {
1024 struct obj_section *section = find_pc_overlay (memaddr);
1025 if (pc_in_unmapped_range (memaddr, section))
1026 return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
1027 }
1028
1029 /* Try GDB's internal data cache. */
1030 region = lookup_mem_region (memaddr);
1031 /* region->hi == 0 means there's no upper bound. */
1032 if (memaddr + len < region->hi || region->hi == 0)
1033 reg_len = len;
1034 else
1035 reg_len = region->hi - memaddr;
1036
1037 switch (region->attrib.mode)
1038 {
1039 case MEM_RO:
1040 if (writebuf != NULL)
1041 return -1;
1042 break;
1043
1044 case MEM_WO:
1045 if (readbuf != NULL)
1046 return -1;
1047 break;
1048
1049 case MEM_FLASH:
1050 /* We only support writing to flash during "load" for now. */
1051 if (writebuf != NULL)
1052 error (_("Writing to flash memory forbidden in this context"));
1053 break;
1054
1055 case MEM_NONE:
1056 return -1;
1057 }
1058
1059 if (region->attrib.cache)
1060 {
1061 /* FIXME drow/2006-08-09: This call discards OPS, so the raw
1062 memory request will start back at current_target. */
1063 if (readbuf != NULL)
1064 res = dcache_xfer_memory (target_dcache, memaddr, readbuf,
1065 reg_len, 0);
1066 else
1067 /* FIXME drow/2006-08-09: If we're going to preserve const
1068 correctness dcache_xfer_memory should take readbuf and
1069 writebuf. */
1070 res = dcache_xfer_memory (target_dcache, memaddr,
1071 (void *) writebuf,
1072 reg_len, 1);
1073 if (res <= 0)
1074 return -1;
1075 else
1076 {
1077 if (readbuf && !show_memory_breakpoints)
1078 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1079 return res;
1080 }
1081 }
1082
1083 /* If none of those methods found the memory we wanted, fall back
1084 to a target partial transfer. Normally a single call to
1085 to_xfer_partial is enough; if it doesn't recognize an object
1086 it will call the to_xfer_partial of the next target down.
1087 But for memory this won't do. Memory is the only target
1088 object which can be read from more than one valid target.
1089 A core file, for instance, could have some of memory but
1090 delegate other bits to the target below it. So, we must
1091 manually try all targets. */
1092
1093 do
1094 {
1095 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1096 readbuf, writebuf, memaddr, reg_len);
1097 if (res > 0)
1098 break;
1099
1100 /* We want to continue past core files to executables, but not
1101 past a running target's memory. */
1102 if (ops->to_has_all_memory)
1103 break;
1104
1105 ops = ops->beneath;
1106 }
1107 while (ops != NULL);
1108
1109 if (readbuf && !show_memory_breakpoints)
1110 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1111
1112 /* If we still haven't got anything, return the last error. We
1113 give up. */
1114 return res;
1115 }
1116
1117 static void
1118 restore_show_memory_breakpoints (void *arg)
1119 {
1120 show_memory_breakpoints = (uintptr_t) arg;
1121 }
1122
1123 struct cleanup *
1124 make_show_memory_breakpoints_cleanup (int show)
1125 {
1126 int current = show_memory_breakpoints;
1127 show_memory_breakpoints = show;
1128
1129 return make_cleanup (restore_show_memory_breakpoints,
1130 (void *) (uintptr_t) current);
1131 }
1132
1133 static LONGEST
1134 target_xfer_partial (struct target_ops *ops,
1135 enum target_object object, const char *annex,
1136 void *readbuf, const void *writebuf,
1137 ULONGEST offset, LONGEST len)
1138 {
1139 LONGEST retval;
1140
1141 gdb_assert (ops->to_xfer_partial != NULL);
1142
1143 /* If this is a memory transfer, let the memory-specific code
1144 have a look at it instead. Memory transfers are more
1145 complicated. */
1146 if (object == TARGET_OBJECT_MEMORY)
1147 retval = memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1148 else
1149 {
1150 enum target_object raw_object = object;
1151
1152 /* If this is a raw memory transfer, request the normal
1153 memory object from other layers. */
1154 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1155 raw_object = TARGET_OBJECT_MEMORY;
1156
1157 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1158 writebuf, offset, len);
1159 }
1160
1161 if (targetdebug)
1162 {
1163 const unsigned char *myaddr = NULL;
1164
1165 fprintf_unfiltered (gdb_stdlog,
1166 "%s:target_xfer_partial (%d, %s, 0x%lx, 0x%lx, %s, %s) = %s",
1167 ops->to_shortname,
1168 (int) object,
1169 (annex ? annex : "(null)"),
1170 (long) readbuf, (long) writebuf,
1171 core_addr_to_string_nz (offset),
1172 plongest (len), plongest (retval));
1173
1174 if (readbuf)
1175 myaddr = readbuf;
1176 if (writebuf)
1177 myaddr = writebuf;
1178 if (retval > 0 && myaddr != NULL)
1179 {
1180 int i;
1181
1182 fputs_unfiltered (", bytes =", gdb_stdlog);
1183 for (i = 0; i < retval; i++)
1184 {
1185 if ((((long) &(myaddr[i])) & 0xf) == 0)
1186 {
1187 if (targetdebug < 2 && i > 0)
1188 {
1189 fprintf_unfiltered (gdb_stdlog, " ...");
1190 break;
1191 }
1192 fprintf_unfiltered (gdb_stdlog, "\n");
1193 }
1194
1195 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1196 }
1197 }
1198
1199 fputc_unfiltered ('\n', gdb_stdlog);
1200 }
1201 return retval;
1202 }
1203
1204 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1205 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1206 if any error occurs.
1207
1208 If an error occurs, no guarantee is made about the contents of the data at
1209 MYADDR. In particular, the caller should not depend upon partial reads
1210 filling the buffer with good data. There is no way for the caller to know
1211 how much good data might have been transfered anyway. Callers that can
1212 deal with partial reads should call target_read (which will retry until
1213 it makes no progress, and then return how much was transferred). */
1214
1215 int
1216 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1217 {
1218 if (target_read (&current_target, TARGET_OBJECT_MEMORY, NULL,
1219 myaddr, memaddr, len) == len)
1220 return 0;
1221 else
1222 return EIO;
1223 }
1224
1225 int
1226 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1227 {
1228 if (target_write (&current_target, TARGET_OBJECT_MEMORY, NULL,
1229 myaddr, memaddr, len) == len)
1230 return 0;
1231 else
1232 return EIO;
1233 }
1234
1235 /* Fetch the target's memory map. */
1236
1237 VEC(mem_region_s) *
1238 target_memory_map (void)
1239 {
1240 VEC(mem_region_s) *result;
1241 struct mem_region *last_one, *this_one;
1242 int ix;
1243 struct target_ops *t;
1244
1245 if (targetdebug)
1246 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1247
1248 for (t = current_target.beneath; t != NULL; t = t->beneath)
1249 if (t->to_memory_map != NULL)
1250 break;
1251
1252 if (t == NULL)
1253 return NULL;
1254
1255 result = t->to_memory_map (t);
1256 if (result == NULL)
1257 return NULL;
1258
1259 qsort (VEC_address (mem_region_s, result),
1260 VEC_length (mem_region_s, result),
1261 sizeof (struct mem_region), mem_region_cmp);
1262
1263 /* Check that regions do not overlap. Simultaneously assign
1264 a numbering for the "mem" commands to use to refer to
1265 each region. */
1266 last_one = NULL;
1267 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1268 {
1269 this_one->number = ix;
1270
1271 if (last_one && last_one->hi > this_one->lo)
1272 {
1273 warning (_("Overlapping regions in memory map: ignoring"));
1274 VEC_free (mem_region_s, result);
1275 return NULL;
1276 }
1277 last_one = this_one;
1278 }
1279
1280 return result;
1281 }
1282
1283 void
1284 target_flash_erase (ULONGEST address, LONGEST length)
1285 {
1286 struct target_ops *t;
1287
1288 for (t = current_target.beneath; t != NULL; t = t->beneath)
1289 if (t->to_flash_erase != NULL)
1290 {
1291 if (targetdebug)
1292 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1293 paddr (address), phex (length, 0));
1294 t->to_flash_erase (t, address, length);
1295 return;
1296 }
1297
1298 tcomplain ();
1299 }
1300
1301 void
1302 target_flash_done (void)
1303 {
1304 struct target_ops *t;
1305
1306 for (t = current_target.beneath; t != NULL; t = t->beneath)
1307 if (t->to_flash_done != NULL)
1308 {
1309 if (targetdebug)
1310 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1311 t->to_flash_done (t);
1312 return;
1313 }
1314
1315 tcomplain ();
1316 }
1317
1318 #ifndef target_stopped_data_address_p
1319 int
1320 target_stopped_data_address_p (struct target_ops *target)
1321 {
1322 if (target->to_stopped_data_address
1323 == (int (*) (struct target_ops *, CORE_ADDR *)) return_zero)
1324 return 0;
1325 if (target->to_stopped_data_address == debug_to_stopped_data_address
1326 && (debug_target.to_stopped_data_address
1327 == (int (*) (struct target_ops *, CORE_ADDR *)) return_zero))
1328 return 0;
1329 return 1;
1330 }
1331 #endif
1332
1333 static void
1334 show_trust_readonly (struct ui_file *file, int from_tty,
1335 struct cmd_list_element *c, const char *value)
1336 {
1337 fprintf_filtered (file, _("\
1338 Mode for reading from readonly sections is %s.\n"),
1339 value);
1340 }
1341
1342 /* More generic transfers. */
1343
1344 static LONGEST
1345 default_xfer_partial (struct target_ops *ops, enum target_object object,
1346 const char *annex, gdb_byte *readbuf,
1347 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1348 {
1349 if (object == TARGET_OBJECT_MEMORY
1350 && ops->deprecated_xfer_memory != NULL)
1351 /* If available, fall back to the target's
1352 "deprecated_xfer_memory" method. */
1353 {
1354 int xfered = -1;
1355 errno = 0;
1356 if (writebuf != NULL)
1357 {
1358 void *buffer = xmalloc (len);
1359 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1360 memcpy (buffer, writebuf, len);
1361 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1362 1/*write*/, NULL, ops);
1363 do_cleanups (cleanup);
1364 }
1365 if (readbuf != NULL)
1366 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1367 0/*read*/, NULL, ops);
1368 if (xfered > 0)
1369 return xfered;
1370 else if (xfered == 0 && errno == 0)
1371 /* "deprecated_xfer_memory" uses 0, cross checked against
1372 ERRNO as one indication of an error. */
1373 return 0;
1374 else
1375 return -1;
1376 }
1377 else if (ops->beneath != NULL)
1378 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1379 readbuf, writebuf, offset, len);
1380 else
1381 return -1;
1382 }
1383
1384 /* The xfer_partial handler for the topmost target. Unlike the default,
1385 it does not need to handle memory specially; it just passes all
1386 requests down the stack. */
1387
1388 static LONGEST
1389 current_xfer_partial (struct target_ops *ops, enum target_object object,
1390 const char *annex, gdb_byte *readbuf,
1391 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1392 {
1393 if (ops->beneath != NULL)
1394 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1395 readbuf, writebuf, offset, len);
1396 else
1397 return -1;
1398 }
1399
1400 /* Target vector read/write partial wrapper functions.
1401
1402 NOTE: cagney/2003-10-21: I wonder if having "to_xfer_partial
1403 (inbuf, outbuf)", instead of separate read/write methods, make life
1404 easier. */
1405
1406 static LONGEST
1407 target_read_partial (struct target_ops *ops,
1408 enum target_object object,
1409 const char *annex, gdb_byte *buf,
1410 ULONGEST offset, LONGEST len)
1411 {
1412 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1413 }
1414
1415 static LONGEST
1416 target_write_partial (struct target_ops *ops,
1417 enum target_object object,
1418 const char *annex, const gdb_byte *buf,
1419 ULONGEST offset, LONGEST len)
1420 {
1421 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1422 }
1423
1424 /* Wrappers to perform the full transfer. */
1425 LONGEST
1426 target_read (struct target_ops *ops,
1427 enum target_object object,
1428 const char *annex, gdb_byte *buf,
1429 ULONGEST offset, LONGEST len)
1430 {
1431 LONGEST xfered = 0;
1432 while (xfered < len)
1433 {
1434 LONGEST xfer = target_read_partial (ops, object, annex,
1435 (gdb_byte *) buf + xfered,
1436 offset + xfered, len - xfered);
1437 /* Call an observer, notifying them of the xfer progress? */
1438 if (xfer == 0)
1439 return xfered;
1440 if (xfer < 0)
1441 return -1;
1442 xfered += xfer;
1443 QUIT;
1444 }
1445 return len;
1446 }
1447
1448 LONGEST
1449 target_read_until_error (struct target_ops *ops,
1450 enum target_object object,
1451 const char *annex, gdb_byte *buf,
1452 ULONGEST offset, LONGEST len)
1453 {
1454 LONGEST xfered = 0;
1455 while (xfered < len)
1456 {
1457 LONGEST xfer = target_read_partial (ops, object, annex,
1458 (gdb_byte *) buf + xfered,
1459 offset + xfered, len - xfered);
1460 /* Call an observer, notifying them of the xfer progress? */
1461 if (xfer == 0)
1462 return xfered;
1463 if (xfer < 0)
1464 {
1465 /* We've got an error. Try to read in smaller blocks. */
1466 ULONGEST start = offset + xfered;
1467 ULONGEST remaining = len - xfered;
1468 ULONGEST half;
1469
1470 /* If an attempt was made to read a random memory address,
1471 it's likely that the very first byte is not accessible.
1472 Try reading the first byte, to avoid doing log N tries
1473 below. */
1474 xfer = target_read_partial (ops, object, annex,
1475 (gdb_byte *) buf + xfered, start, 1);
1476 if (xfer <= 0)
1477 return xfered;
1478 start += 1;
1479 remaining -= 1;
1480 half = remaining/2;
1481
1482 while (half > 0)
1483 {
1484 xfer = target_read_partial (ops, object, annex,
1485 (gdb_byte *) buf + xfered,
1486 start, half);
1487 if (xfer == 0)
1488 return xfered;
1489 if (xfer < 0)
1490 {
1491 remaining = half;
1492 }
1493 else
1494 {
1495 /* We have successfully read the first half. So, the
1496 error must be in the second half. Adjust start and
1497 remaining to point at the second half. */
1498 xfered += xfer;
1499 start += xfer;
1500 remaining -= xfer;
1501 }
1502 half = remaining/2;
1503 }
1504
1505 return xfered;
1506 }
1507 xfered += xfer;
1508 QUIT;
1509 }
1510 return len;
1511 }
1512
1513
1514 /* An alternative to target_write with progress callbacks. */
1515
1516 LONGEST
1517 target_write_with_progress (struct target_ops *ops,
1518 enum target_object object,
1519 const char *annex, const gdb_byte *buf,
1520 ULONGEST offset, LONGEST len,
1521 void (*progress) (ULONGEST, void *), void *baton)
1522 {
1523 LONGEST xfered = 0;
1524
1525 /* Give the progress callback a chance to set up. */
1526 if (progress)
1527 (*progress) (0, baton);
1528
1529 while (xfered < len)
1530 {
1531 LONGEST xfer = target_write_partial (ops, object, annex,
1532 (gdb_byte *) buf + xfered,
1533 offset + xfered, len - xfered);
1534
1535 if (xfer == 0)
1536 return xfered;
1537 if (xfer < 0)
1538 return -1;
1539
1540 if (progress)
1541 (*progress) (xfer, baton);
1542
1543 xfered += xfer;
1544 QUIT;
1545 }
1546 return len;
1547 }
1548
1549 LONGEST
1550 target_write (struct target_ops *ops,
1551 enum target_object object,
1552 const char *annex, const gdb_byte *buf,
1553 ULONGEST offset, LONGEST len)
1554 {
1555 return target_write_with_progress (ops, object, annex, buf, offset, len,
1556 NULL, NULL);
1557 }
1558
1559 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1560 the size of the transferred data. PADDING additional bytes are
1561 available in *BUF_P. This is a helper function for
1562 target_read_alloc; see the declaration of that function for more
1563 information. */
1564
1565 static LONGEST
1566 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1567 const char *annex, gdb_byte **buf_p, int padding)
1568 {
1569 size_t buf_alloc, buf_pos;
1570 gdb_byte *buf;
1571 LONGEST n;
1572
1573 /* This function does not have a length parameter; it reads the
1574 entire OBJECT). Also, it doesn't support objects fetched partly
1575 from one target and partly from another (in a different stratum,
1576 e.g. a core file and an executable). Both reasons make it
1577 unsuitable for reading memory. */
1578 gdb_assert (object != TARGET_OBJECT_MEMORY);
1579
1580 /* Start by reading up to 4K at a time. The target will throttle
1581 this number down if necessary. */
1582 buf_alloc = 4096;
1583 buf = xmalloc (buf_alloc);
1584 buf_pos = 0;
1585 while (1)
1586 {
1587 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1588 buf_pos, buf_alloc - buf_pos - padding);
1589 if (n < 0)
1590 {
1591 /* An error occurred. */
1592 xfree (buf);
1593 return -1;
1594 }
1595 else if (n == 0)
1596 {
1597 /* Read all there was. */
1598 if (buf_pos == 0)
1599 xfree (buf);
1600 else
1601 *buf_p = buf;
1602 return buf_pos;
1603 }
1604
1605 buf_pos += n;
1606
1607 /* If the buffer is filling up, expand it. */
1608 if (buf_alloc < buf_pos * 2)
1609 {
1610 buf_alloc *= 2;
1611 buf = xrealloc (buf, buf_alloc);
1612 }
1613
1614 QUIT;
1615 }
1616 }
1617
1618 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1619 the size of the transferred data. See the declaration in "target.h"
1620 function for more information about the return value. */
1621
1622 LONGEST
1623 target_read_alloc (struct target_ops *ops, enum target_object object,
1624 const char *annex, gdb_byte **buf_p)
1625 {
1626 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1627 }
1628
1629 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1630 returned as a string, allocated using xmalloc. If an error occurs
1631 or the transfer is unsupported, NULL is returned. Empty objects
1632 are returned as allocated but empty strings. A warning is issued
1633 if the result contains any embedded NUL bytes. */
1634
1635 char *
1636 target_read_stralloc (struct target_ops *ops, enum target_object object,
1637 const char *annex)
1638 {
1639 gdb_byte *buffer;
1640 LONGEST transferred;
1641
1642 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1643
1644 if (transferred < 0)
1645 return NULL;
1646
1647 if (transferred == 0)
1648 return xstrdup ("");
1649
1650 buffer[transferred] = 0;
1651 if (strlen (buffer) < transferred)
1652 warning (_("target object %d, annex %s, "
1653 "contained unexpected null characters"),
1654 (int) object, annex ? annex : "(none)");
1655
1656 return (char *) buffer;
1657 }
1658
1659 /* Memory transfer methods. */
1660
1661 void
1662 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1663 LONGEST len)
1664 {
1665 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, buf, addr, len)
1666 != len)
1667 memory_error (EIO, addr);
1668 }
1669
1670 ULONGEST
1671 get_target_memory_unsigned (struct target_ops *ops,
1672 CORE_ADDR addr, int len)
1673 {
1674 gdb_byte buf[sizeof (ULONGEST)];
1675
1676 gdb_assert (len <= sizeof (buf));
1677 get_target_memory (ops, addr, buf, len);
1678 return extract_unsigned_integer (buf, len);
1679 }
1680
1681 static void
1682 target_info (char *args, int from_tty)
1683 {
1684 struct target_ops *t;
1685 int has_all_mem = 0;
1686
1687 if (symfile_objfile != NULL)
1688 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
1689
1690 for (t = target_stack; t != NULL; t = t->beneath)
1691 {
1692 if (!t->to_has_memory)
1693 continue;
1694
1695 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1696 continue;
1697 if (has_all_mem)
1698 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
1699 printf_unfiltered ("%s:\n", t->to_longname);
1700 (t->to_files_info) (t);
1701 has_all_mem = t->to_has_all_memory;
1702 }
1703 }
1704
1705 /* This function is called before any new inferior is created, e.g.
1706 by running a program, attaching, or connecting to a target.
1707 It cleans up any state from previous invocations which might
1708 change between runs. This is a subset of what target_preopen
1709 resets (things which might change between targets). */
1710
1711 void
1712 target_pre_inferior (int from_tty)
1713 {
1714 /* Clear out solib state. Otherwise the solib state of the previous
1715 inferior might have survived and is entirely wrong for the new
1716 target. This has been observed on GNU/Linux using glibc 2.3. How
1717 to reproduce:
1718
1719 bash$ ./foo&
1720 [1] 4711
1721 bash$ ./foo&
1722 [1] 4712
1723 bash$ gdb ./foo
1724 [...]
1725 (gdb) attach 4711
1726 (gdb) detach
1727 (gdb) attach 4712
1728 Cannot access memory at address 0xdeadbeef
1729 */
1730 no_shared_libraries (NULL, from_tty);
1731
1732 invalidate_target_mem_regions ();
1733
1734 target_clear_description ();
1735 }
1736
1737 /* This is to be called by the open routine before it does
1738 anything. */
1739
1740 void
1741 target_preopen (int from_tty)
1742 {
1743 dont_repeat ();
1744
1745 if (target_has_execution)
1746 {
1747 if (!from_tty
1748 || query (_("A program is being debugged already. Kill it? ")))
1749 target_kill ();
1750 else
1751 error (_("Program not killed."));
1752 }
1753
1754 /* Calling target_kill may remove the target from the stack. But if
1755 it doesn't (which seems like a win for UDI), remove it now. */
1756 /* Leave the exec target, though. The user may be switching from a
1757 live process to a core of the same program. */
1758 pop_all_targets_above (file_stratum, 0);
1759
1760 target_pre_inferior (from_tty);
1761 }
1762
1763 /* Detach a target after doing deferred register stores. */
1764
1765 void
1766 target_detach (char *args, int from_tty)
1767 {
1768 /* If we're in breakpoints-always-inserted mode, have to
1769 remove them before detaching. */
1770 remove_breakpoints ();
1771
1772 (current_target.to_detach) (args, from_tty);
1773 }
1774
1775 void
1776 target_disconnect (char *args, int from_tty)
1777 {
1778 struct target_ops *t;
1779
1780 /* If we're in breakpoints-always-inserted mode, have to
1781 remove them before disconnecting. */
1782 remove_breakpoints ();
1783
1784 for (t = current_target.beneath; t != NULL; t = t->beneath)
1785 if (t->to_disconnect != NULL)
1786 {
1787 if (targetdebug)
1788 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
1789 args, from_tty);
1790 t->to_disconnect (t, args, from_tty);
1791 return;
1792 }
1793
1794 tcomplain ();
1795 }
1796
1797 void
1798 target_resume (ptid_t ptid, int step, enum target_signal signal)
1799 {
1800 dcache_invalidate (target_dcache);
1801 (*current_target.to_resume) (ptid, step, signal);
1802 set_executing (ptid, 1);
1803 set_running (ptid, 1);
1804 }
1805 /* Look through the list of possible targets for a target that can
1806 follow forks. */
1807
1808 int
1809 target_follow_fork (int follow_child)
1810 {
1811 struct target_ops *t;
1812
1813 for (t = current_target.beneath; t != NULL; t = t->beneath)
1814 {
1815 if (t->to_follow_fork != NULL)
1816 {
1817 int retval = t->to_follow_fork (t, follow_child);
1818 if (targetdebug)
1819 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
1820 follow_child, retval);
1821 return retval;
1822 }
1823 }
1824
1825 /* Some target returned a fork event, but did not know how to follow it. */
1826 internal_error (__FILE__, __LINE__,
1827 "could not find a target to follow fork");
1828 }
1829
1830 /* Look for a target which can describe architectural features, starting
1831 from TARGET. If we find one, return its description. */
1832
1833 const struct target_desc *
1834 target_read_description (struct target_ops *target)
1835 {
1836 struct target_ops *t;
1837
1838 for (t = target; t != NULL; t = t->beneath)
1839 if (t->to_read_description != NULL)
1840 {
1841 const struct target_desc *tdesc;
1842
1843 tdesc = t->to_read_description (t);
1844 if (tdesc)
1845 return tdesc;
1846 }
1847
1848 return NULL;
1849 }
1850
1851 /* The default implementation of to_search_memory.
1852 This implements a basic search of memory, reading target memory and
1853 performing the search here (as opposed to performing the search in on the
1854 target side with, for example, gdbserver). */
1855
1856 int
1857 simple_search_memory (struct target_ops *ops,
1858 CORE_ADDR start_addr, ULONGEST search_space_len,
1859 const gdb_byte *pattern, ULONGEST pattern_len,
1860 CORE_ADDR *found_addrp)
1861 {
1862 /* NOTE: also defined in find.c testcase. */
1863 #define SEARCH_CHUNK_SIZE 16000
1864 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
1865 /* Buffer to hold memory contents for searching. */
1866 gdb_byte *search_buf;
1867 unsigned search_buf_size;
1868 struct cleanup *old_cleanups;
1869
1870 search_buf_size = chunk_size + pattern_len - 1;
1871
1872 /* No point in trying to allocate a buffer larger than the search space. */
1873 if (search_space_len < search_buf_size)
1874 search_buf_size = search_space_len;
1875
1876 search_buf = malloc (search_buf_size);
1877 if (search_buf == NULL)
1878 error (_("Unable to allocate memory to perform the search."));
1879 old_cleanups = make_cleanup (free_current_contents, &search_buf);
1880
1881 /* Prime the search buffer. */
1882
1883 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1884 search_buf, start_addr, search_buf_size) != search_buf_size)
1885 {
1886 warning (_("Unable to access target memory at %s, halting search."),
1887 hex_string (start_addr));
1888 do_cleanups (old_cleanups);
1889 return -1;
1890 }
1891
1892 /* Perform the search.
1893
1894 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
1895 When we've scanned N bytes we copy the trailing bytes to the start and
1896 read in another N bytes. */
1897
1898 while (search_space_len >= pattern_len)
1899 {
1900 gdb_byte *found_ptr;
1901 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
1902
1903 found_ptr = memmem (search_buf, nr_search_bytes,
1904 pattern, pattern_len);
1905
1906 if (found_ptr != NULL)
1907 {
1908 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
1909 *found_addrp = found_addr;
1910 do_cleanups (old_cleanups);
1911 return 1;
1912 }
1913
1914 /* Not found in this chunk, skip to next chunk. */
1915
1916 /* Don't let search_space_len wrap here, it's unsigned. */
1917 if (search_space_len >= chunk_size)
1918 search_space_len -= chunk_size;
1919 else
1920 search_space_len = 0;
1921
1922 if (search_space_len >= pattern_len)
1923 {
1924 unsigned keep_len = search_buf_size - chunk_size;
1925 CORE_ADDR read_addr = start_addr + keep_len;
1926 int nr_to_read;
1927
1928 /* Copy the trailing part of the previous iteration to the front
1929 of the buffer for the next iteration. */
1930 gdb_assert (keep_len == pattern_len - 1);
1931 memcpy (search_buf, search_buf + chunk_size, keep_len);
1932
1933 nr_to_read = min (search_space_len - keep_len, chunk_size);
1934
1935 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1936 search_buf + keep_len, read_addr,
1937 nr_to_read) != nr_to_read)
1938 {
1939 warning (_("Unable to access target memory at %s, halting search."),
1940 hex_string (read_addr));
1941 do_cleanups (old_cleanups);
1942 return -1;
1943 }
1944
1945 start_addr += chunk_size;
1946 }
1947 }
1948
1949 /* Not found. */
1950
1951 do_cleanups (old_cleanups);
1952 return 0;
1953 }
1954
1955 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
1956 sequence of bytes in PATTERN with length PATTERN_LEN.
1957
1958 The result is 1 if found, 0 if not found, and -1 if there was an error
1959 requiring halting of the search (e.g. memory read error).
1960 If the pattern is found the address is recorded in FOUND_ADDRP. */
1961
1962 int
1963 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
1964 const gdb_byte *pattern, ULONGEST pattern_len,
1965 CORE_ADDR *found_addrp)
1966 {
1967 struct target_ops *t;
1968 int found;
1969
1970 /* We don't use INHERIT to set current_target.to_search_memory,
1971 so we have to scan the target stack and handle targetdebug
1972 ourselves. */
1973
1974 if (targetdebug)
1975 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
1976 hex_string (start_addr));
1977
1978 for (t = current_target.beneath; t != NULL; t = t->beneath)
1979 if (t->to_search_memory != NULL)
1980 break;
1981
1982 if (t != NULL)
1983 {
1984 found = t->to_search_memory (t, start_addr, search_space_len,
1985 pattern, pattern_len, found_addrp);
1986 }
1987 else
1988 {
1989 /* If a special version of to_search_memory isn't available, use the
1990 simple version. */
1991 found = simple_search_memory (&current_target,
1992 start_addr, search_space_len,
1993 pattern, pattern_len, found_addrp);
1994 }
1995
1996 if (targetdebug)
1997 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
1998
1999 return found;
2000 }
2001
2002 /* Look through the currently pushed targets. If none of them will
2003 be able to restart the currently running process, issue an error
2004 message. */
2005
2006 void
2007 target_require_runnable (void)
2008 {
2009 struct target_ops *t;
2010
2011 for (t = target_stack; t != NULL; t = t->beneath)
2012 {
2013 /* If this target knows how to create a new program, then
2014 assume we will still be able to after killing the current
2015 one. Either killing and mourning will not pop T, or else
2016 find_default_run_target will find it again. */
2017 if (t->to_create_inferior != NULL)
2018 return;
2019
2020 /* Do not worry about thread_stratum targets that can not
2021 create inferiors. Assume they will be pushed again if
2022 necessary, and continue to the process_stratum. */
2023 if (t->to_stratum == thread_stratum)
2024 continue;
2025
2026 error (_("\
2027 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2028 t->to_shortname);
2029 }
2030
2031 /* This function is only called if the target is running. In that
2032 case there should have been a process_stratum target and it
2033 should either know how to create inferiors, or not... */
2034 internal_error (__FILE__, __LINE__, "No targets found");
2035 }
2036
2037 /* Look through the list of possible targets for a target that can
2038 execute a run or attach command without any other data. This is
2039 used to locate the default process stratum.
2040
2041 If DO_MESG is not NULL, the result is always valid (error() is
2042 called for errors); else, return NULL on error. */
2043
2044 static struct target_ops *
2045 find_default_run_target (char *do_mesg)
2046 {
2047 struct target_ops **t;
2048 struct target_ops *runable = NULL;
2049 int count;
2050
2051 count = 0;
2052
2053 for (t = target_structs; t < target_structs + target_struct_size;
2054 ++t)
2055 {
2056 if ((*t)->to_can_run && target_can_run (*t))
2057 {
2058 runable = *t;
2059 ++count;
2060 }
2061 }
2062
2063 if (count != 1)
2064 {
2065 if (do_mesg)
2066 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2067 else
2068 return NULL;
2069 }
2070
2071 return runable;
2072 }
2073
2074 void
2075 find_default_attach (char *args, int from_tty)
2076 {
2077 struct target_ops *t;
2078
2079 t = find_default_run_target ("attach");
2080 (t->to_attach) (args, from_tty);
2081 return;
2082 }
2083
2084 void
2085 find_default_create_inferior (char *exec_file, char *allargs, char **env,
2086 int from_tty)
2087 {
2088 struct target_ops *t;
2089
2090 t = find_default_run_target ("run");
2091 (t->to_create_inferior) (exec_file, allargs, env, from_tty);
2092 return;
2093 }
2094
2095 int
2096 find_default_can_async_p (void)
2097 {
2098 struct target_ops *t;
2099
2100 /* This may be called before the target is pushed on the stack;
2101 look for the default process stratum. If there's none, gdb isn't
2102 configured with a native debugger, and target remote isn't
2103 connected yet. */
2104 t = find_default_run_target (NULL);
2105 if (t && t->to_can_async_p)
2106 return (t->to_can_async_p) ();
2107 return 0;
2108 }
2109
2110 int
2111 find_default_is_async_p (void)
2112 {
2113 struct target_ops *t;
2114
2115 /* This may be called before the target is pushed on the stack;
2116 look for the default process stratum. If there's none, gdb isn't
2117 configured with a native debugger, and target remote isn't
2118 connected yet. */
2119 t = find_default_run_target (NULL);
2120 if (t && t->to_is_async_p)
2121 return (t->to_is_async_p) ();
2122 return 0;
2123 }
2124
2125 int
2126 find_default_supports_non_stop (void)
2127 {
2128 struct target_ops *t;
2129
2130 t = find_default_run_target (NULL);
2131 if (t && t->to_supports_non_stop)
2132 return (t->to_supports_non_stop) ();
2133 return 0;
2134 }
2135
2136 int
2137 target_supports_non_stop ()
2138 {
2139 struct target_ops *t;
2140 for (t = &current_target; t != NULL; t = t->beneath)
2141 if (t->to_supports_non_stop)
2142 return t->to_supports_non_stop ();
2143
2144 return 0;
2145 }
2146
2147
2148 static int
2149 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2150 {
2151 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2152 }
2153
2154 static int
2155 default_watchpoint_addr_within_range (struct target_ops *target,
2156 CORE_ADDR addr,
2157 CORE_ADDR start, int length)
2158 {
2159 return addr >= start && addr < start + length;
2160 }
2161
2162 static int
2163 return_zero (void)
2164 {
2165 return 0;
2166 }
2167
2168 static int
2169 return_one (void)
2170 {
2171 return 1;
2172 }
2173
2174 static int
2175 return_minus_one (void)
2176 {
2177 return -1;
2178 }
2179
2180 /*
2181 * Resize the to_sections pointer. Also make sure that anyone that
2182 * was holding on to an old value of it gets updated.
2183 * Returns the old size.
2184 */
2185
2186 int
2187 target_resize_to_sections (struct target_ops *target, int num_added)
2188 {
2189 struct target_ops **t;
2190 struct section_table *old_value;
2191 int old_count;
2192
2193 old_value = target->to_sections;
2194
2195 if (target->to_sections)
2196 {
2197 old_count = target->to_sections_end - target->to_sections;
2198 target->to_sections = (struct section_table *)
2199 xrealloc ((char *) target->to_sections,
2200 (sizeof (struct section_table)) * (num_added + old_count));
2201 }
2202 else
2203 {
2204 old_count = 0;
2205 target->to_sections = (struct section_table *)
2206 xmalloc ((sizeof (struct section_table)) * num_added);
2207 }
2208 target->to_sections_end = target->to_sections + (num_added + old_count);
2209
2210 /* Check to see if anyone else was pointing to this structure.
2211 If old_value was null, then no one was. */
2212
2213 if (old_value)
2214 {
2215 for (t = target_structs; t < target_structs + target_struct_size;
2216 ++t)
2217 {
2218 if ((*t)->to_sections == old_value)
2219 {
2220 (*t)->to_sections = target->to_sections;
2221 (*t)->to_sections_end = target->to_sections_end;
2222 }
2223 }
2224 /* There is a flattened view of the target stack in current_target,
2225 so its to_sections pointer might also need updating. */
2226 if (current_target.to_sections == old_value)
2227 {
2228 current_target.to_sections = target->to_sections;
2229 current_target.to_sections_end = target->to_sections_end;
2230 }
2231 }
2232
2233 return old_count;
2234
2235 }
2236
2237 /* Remove all target sections taken from ABFD.
2238
2239 Scan the current target stack for targets whose section tables
2240 refer to sections from BFD, and remove those sections. We use this
2241 when we notice that the inferior has unloaded a shared object, for
2242 example. */
2243 void
2244 remove_target_sections (bfd *abfd)
2245 {
2246 struct target_ops **t;
2247
2248 for (t = target_structs; t < target_structs + target_struct_size; t++)
2249 {
2250 struct section_table *src, *dest;
2251
2252 dest = (*t)->to_sections;
2253 for (src = (*t)->to_sections; src < (*t)->to_sections_end; src++)
2254 if (src->bfd != abfd)
2255 {
2256 /* Keep this section. */
2257 if (dest < src) *dest = *src;
2258 dest++;
2259 }
2260
2261 /* If we've dropped any sections, resize the section table. */
2262 if (dest < src)
2263 target_resize_to_sections (*t, dest - src);
2264 }
2265 }
2266
2267
2268
2269
2270 /* Find a single runnable target in the stack and return it. If for
2271 some reason there is more than one, return NULL. */
2272
2273 struct target_ops *
2274 find_run_target (void)
2275 {
2276 struct target_ops **t;
2277 struct target_ops *runable = NULL;
2278 int count;
2279
2280 count = 0;
2281
2282 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2283 {
2284 if ((*t)->to_can_run && target_can_run (*t))
2285 {
2286 runable = *t;
2287 ++count;
2288 }
2289 }
2290
2291 return (count == 1 ? runable : NULL);
2292 }
2293
2294 /* Find a single core_stratum target in the list of targets and return it.
2295 If for some reason there is more than one, return NULL. */
2296
2297 struct target_ops *
2298 find_core_target (void)
2299 {
2300 struct target_ops **t;
2301 struct target_ops *runable = NULL;
2302 int count;
2303
2304 count = 0;
2305
2306 for (t = target_structs; t < target_structs + target_struct_size;
2307 ++t)
2308 {
2309 if ((*t)->to_stratum == core_stratum)
2310 {
2311 runable = *t;
2312 ++count;
2313 }
2314 }
2315
2316 return (count == 1 ? runable : NULL);
2317 }
2318
2319 /*
2320 * Find the next target down the stack from the specified target.
2321 */
2322
2323 struct target_ops *
2324 find_target_beneath (struct target_ops *t)
2325 {
2326 return t->beneath;
2327 }
2328
2329 \f
2330 /* The inferior process has died. Long live the inferior! */
2331
2332 void
2333 generic_mourn_inferior (void)
2334 {
2335 extern int show_breakpoint_hit_counts;
2336 ptid_t ptid;
2337
2338 ptid = inferior_ptid;
2339 inferior_ptid = null_ptid;
2340
2341 if (!ptid_equal (ptid, null_ptid))
2342 {
2343 int pid = ptid_get_pid (ptid);
2344 delete_inferior (pid);
2345 }
2346
2347 breakpoint_init_inferior (inf_exited);
2348 registers_changed ();
2349
2350 reopen_exec_file ();
2351 reinit_frame_cache ();
2352
2353 /* It is confusing to the user for ignore counts to stick around
2354 from previous runs of the inferior. So clear them. */
2355 /* However, it is more confusing for the ignore counts to disappear when
2356 using hit counts. So don't clear them if we're counting hits. */
2357 if (!show_breakpoint_hit_counts)
2358 breakpoint_clear_ignore_counts ();
2359
2360 if (deprecated_detach_hook)
2361 deprecated_detach_hook ();
2362 }
2363 \f
2364 /* Helper function for child_wait and the derivatives of child_wait.
2365 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2366 translation of that in OURSTATUS. */
2367 void
2368 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2369 {
2370 if (WIFEXITED (hoststatus))
2371 {
2372 ourstatus->kind = TARGET_WAITKIND_EXITED;
2373 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2374 }
2375 else if (!WIFSTOPPED (hoststatus))
2376 {
2377 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2378 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2379 }
2380 else
2381 {
2382 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2383 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2384 }
2385 }
2386 \f
2387 /* Returns zero to leave the inferior alone, one to interrupt it. */
2388 int (*target_activity_function) (void);
2389 int target_activity_fd;
2390 \f
2391 /* Convert a normal process ID to a string. Returns the string in a
2392 static buffer. */
2393
2394 char *
2395 normal_pid_to_str (ptid_t ptid)
2396 {
2397 static char buf[32];
2398
2399 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2400 return buf;
2401 }
2402
2403 /* Error-catcher for target_find_memory_regions */
2404 static int dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2405 {
2406 error (_("No target."));
2407 return 0;
2408 }
2409
2410 /* Error-catcher for target_make_corefile_notes */
2411 static char * dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2412 {
2413 error (_("No target."));
2414 return NULL;
2415 }
2416
2417 /* Set up the handful of non-empty slots needed by the dummy target
2418 vector. */
2419
2420 static void
2421 init_dummy_target (void)
2422 {
2423 dummy_target.to_shortname = "None";
2424 dummy_target.to_longname = "None";
2425 dummy_target.to_doc = "";
2426 dummy_target.to_attach = find_default_attach;
2427 dummy_target.to_create_inferior = find_default_create_inferior;
2428 dummy_target.to_can_async_p = find_default_can_async_p;
2429 dummy_target.to_is_async_p = find_default_is_async_p;
2430 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2431 dummy_target.to_pid_to_str = normal_pid_to_str;
2432 dummy_target.to_stratum = dummy_stratum;
2433 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2434 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2435 dummy_target.to_xfer_partial = default_xfer_partial;
2436 dummy_target.to_magic = OPS_MAGIC;
2437 }
2438 \f
2439 static void
2440 debug_to_open (char *args, int from_tty)
2441 {
2442 debug_target.to_open (args, from_tty);
2443
2444 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2445 }
2446
2447 static void
2448 debug_to_close (int quitting)
2449 {
2450 target_close (&debug_target, quitting);
2451 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2452 }
2453
2454 void
2455 target_close (struct target_ops *targ, int quitting)
2456 {
2457 if (targ->to_xclose != NULL)
2458 targ->to_xclose (targ, quitting);
2459 else if (targ->to_close != NULL)
2460 targ->to_close (quitting);
2461 }
2462
2463 static void
2464 debug_to_attach (char *args, int from_tty)
2465 {
2466 debug_target.to_attach (args, from_tty);
2467
2468 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n", args, from_tty);
2469 }
2470
2471
2472 static void
2473 debug_to_post_attach (int pid)
2474 {
2475 debug_target.to_post_attach (pid);
2476
2477 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
2478 }
2479
2480 static void
2481 debug_to_detach (char *args, int from_tty)
2482 {
2483 debug_target.to_detach (args, from_tty);
2484
2485 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n", args, from_tty);
2486 }
2487
2488 static void
2489 debug_to_resume (ptid_t ptid, int step, enum target_signal siggnal)
2490 {
2491 debug_target.to_resume (ptid, step, siggnal);
2492
2493 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n", PIDGET (ptid),
2494 step ? "step" : "continue",
2495 target_signal_to_name (siggnal));
2496 }
2497
2498 static ptid_t
2499 debug_to_wait (ptid_t ptid, struct target_waitstatus *status)
2500 {
2501 ptid_t retval;
2502
2503 retval = debug_target.to_wait (ptid, status);
2504
2505 fprintf_unfiltered (gdb_stdlog,
2506 "target_wait (%d, status) = %d, ", PIDGET (ptid),
2507 PIDGET (retval));
2508 fprintf_unfiltered (gdb_stdlog, "status->kind = ");
2509 switch (status->kind)
2510 {
2511 case TARGET_WAITKIND_EXITED:
2512 fprintf_unfiltered (gdb_stdlog, "exited, status = %d\n",
2513 status->value.integer);
2514 break;
2515 case TARGET_WAITKIND_STOPPED:
2516 fprintf_unfiltered (gdb_stdlog, "stopped, signal = %s\n",
2517 target_signal_to_name (status->value.sig));
2518 break;
2519 case TARGET_WAITKIND_SIGNALLED:
2520 fprintf_unfiltered (gdb_stdlog, "signalled, signal = %s\n",
2521 target_signal_to_name (status->value.sig));
2522 break;
2523 case TARGET_WAITKIND_LOADED:
2524 fprintf_unfiltered (gdb_stdlog, "loaded\n");
2525 break;
2526 case TARGET_WAITKIND_FORKED:
2527 fprintf_unfiltered (gdb_stdlog, "forked\n");
2528 break;
2529 case TARGET_WAITKIND_VFORKED:
2530 fprintf_unfiltered (gdb_stdlog, "vforked\n");
2531 break;
2532 case TARGET_WAITKIND_EXECD:
2533 fprintf_unfiltered (gdb_stdlog, "execd\n");
2534 break;
2535 case TARGET_WAITKIND_SPURIOUS:
2536 fprintf_unfiltered (gdb_stdlog, "spurious\n");
2537 break;
2538 default:
2539 fprintf_unfiltered (gdb_stdlog, "unknown???\n");
2540 break;
2541 }
2542
2543 return retval;
2544 }
2545
2546 static void
2547 debug_print_register (const char * func,
2548 struct regcache *regcache, int regno)
2549 {
2550 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2551 fprintf_unfiltered (gdb_stdlog, "%s ", func);
2552 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
2553 && gdbarch_register_name (gdbarch, regno) != NULL
2554 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
2555 fprintf_unfiltered (gdb_stdlog, "(%s)",
2556 gdbarch_register_name (gdbarch, regno));
2557 else
2558 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
2559 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
2560 {
2561 int i, size = register_size (gdbarch, regno);
2562 unsigned char buf[MAX_REGISTER_SIZE];
2563 regcache_raw_collect (regcache, regno, buf);
2564 fprintf_unfiltered (gdb_stdlog, " = ");
2565 for (i = 0; i < size; i++)
2566 {
2567 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
2568 }
2569 if (size <= sizeof (LONGEST))
2570 {
2571 ULONGEST val = extract_unsigned_integer (buf, size);
2572 fprintf_unfiltered (gdb_stdlog, " %s %s",
2573 core_addr_to_string_nz (val), plongest (val));
2574 }
2575 }
2576 fprintf_unfiltered (gdb_stdlog, "\n");
2577 }
2578
2579 static void
2580 debug_to_fetch_registers (struct regcache *regcache, int regno)
2581 {
2582 debug_target.to_fetch_registers (regcache, regno);
2583 debug_print_register ("target_fetch_registers", regcache, regno);
2584 }
2585
2586 static void
2587 debug_to_store_registers (struct regcache *regcache, int regno)
2588 {
2589 debug_target.to_store_registers (regcache, regno);
2590 debug_print_register ("target_store_registers", regcache, regno);
2591 fprintf_unfiltered (gdb_stdlog, "\n");
2592 }
2593
2594 static void
2595 debug_to_prepare_to_store (struct regcache *regcache)
2596 {
2597 debug_target.to_prepare_to_store (regcache);
2598
2599 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
2600 }
2601
2602 static int
2603 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
2604 int write, struct mem_attrib *attrib,
2605 struct target_ops *target)
2606 {
2607 int retval;
2608
2609 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
2610 attrib, target);
2611
2612 fprintf_unfiltered (gdb_stdlog,
2613 "target_xfer_memory (0x%x, xxx, %d, %s, xxx) = %d",
2614 (unsigned int) memaddr, /* possable truncate long long */
2615 len, write ? "write" : "read", retval);
2616
2617 if (retval > 0)
2618 {
2619 int i;
2620
2621 fputs_unfiltered (", bytes =", gdb_stdlog);
2622 for (i = 0; i < retval; i++)
2623 {
2624 if ((((long) &(myaddr[i])) & 0xf) == 0)
2625 {
2626 if (targetdebug < 2 && i > 0)
2627 {
2628 fprintf_unfiltered (gdb_stdlog, " ...");
2629 break;
2630 }
2631 fprintf_unfiltered (gdb_stdlog, "\n");
2632 }
2633
2634 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
2635 }
2636 }
2637
2638 fputc_unfiltered ('\n', gdb_stdlog);
2639
2640 return retval;
2641 }
2642
2643 static void
2644 debug_to_files_info (struct target_ops *target)
2645 {
2646 debug_target.to_files_info (target);
2647
2648 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
2649 }
2650
2651 static int
2652 debug_to_insert_breakpoint (struct bp_target_info *bp_tgt)
2653 {
2654 int retval;
2655
2656 retval = debug_target.to_insert_breakpoint (bp_tgt);
2657
2658 fprintf_unfiltered (gdb_stdlog,
2659 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
2660 (unsigned long) bp_tgt->placed_address,
2661 (unsigned long) retval);
2662 return retval;
2663 }
2664
2665 static int
2666 debug_to_remove_breakpoint (struct bp_target_info *bp_tgt)
2667 {
2668 int retval;
2669
2670 retval = debug_target.to_remove_breakpoint (bp_tgt);
2671
2672 fprintf_unfiltered (gdb_stdlog,
2673 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
2674 (unsigned long) bp_tgt->placed_address,
2675 (unsigned long) retval);
2676 return retval;
2677 }
2678
2679 static int
2680 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
2681 {
2682 int retval;
2683
2684 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
2685
2686 fprintf_unfiltered (gdb_stdlog,
2687 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
2688 (unsigned long) type,
2689 (unsigned long) cnt,
2690 (unsigned long) from_tty,
2691 (unsigned long) retval);
2692 return retval;
2693 }
2694
2695 static int
2696 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2697 {
2698 CORE_ADDR retval;
2699
2700 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
2701
2702 fprintf_unfiltered (gdb_stdlog,
2703 "TARGET_REGION_OK_FOR_HW_WATCHPOINT (%ld, %ld) = 0x%lx\n",
2704 (unsigned long) addr,
2705 (unsigned long) len,
2706 (unsigned long) retval);
2707 return retval;
2708 }
2709
2710 static int
2711 debug_to_stopped_by_watchpoint (void)
2712 {
2713 int retval;
2714
2715 retval = debug_target.to_stopped_by_watchpoint ();
2716
2717 fprintf_unfiltered (gdb_stdlog,
2718 "STOPPED_BY_WATCHPOINT () = %ld\n",
2719 (unsigned long) retval);
2720 return retval;
2721 }
2722
2723 static int
2724 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
2725 {
2726 int retval;
2727
2728 retval = debug_target.to_stopped_data_address (target, addr);
2729
2730 fprintf_unfiltered (gdb_stdlog,
2731 "target_stopped_data_address ([0x%lx]) = %ld\n",
2732 (unsigned long)*addr,
2733 (unsigned long)retval);
2734 return retval;
2735 }
2736
2737 static int
2738 debug_to_watchpoint_addr_within_range (struct target_ops *target,
2739 CORE_ADDR addr,
2740 CORE_ADDR start, int length)
2741 {
2742 int retval;
2743
2744 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
2745 start, length);
2746
2747 fprintf_filtered (gdb_stdlog,
2748 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
2749 (unsigned long) addr, (unsigned long) start, length,
2750 retval);
2751 return retval;
2752 }
2753
2754 static int
2755 debug_to_insert_hw_breakpoint (struct bp_target_info *bp_tgt)
2756 {
2757 int retval;
2758
2759 retval = debug_target.to_insert_hw_breakpoint (bp_tgt);
2760
2761 fprintf_unfiltered (gdb_stdlog,
2762 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
2763 (unsigned long) bp_tgt->placed_address,
2764 (unsigned long) retval);
2765 return retval;
2766 }
2767
2768 static int
2769 debug_to_remove_hw_breakpoint (struct bp_target_info *bp_tgt)
2770 {
2771 int retval;
2772
2773 retval = debug_target.to_remove_hw_breakpoint (bp_tgt);
2774
2775 fprintf_unfiltered (gdb_stdlog,
2776 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
2777 (unsigned long) bp_tgt->placed_address,
2778 (unsigned long) retval);
2779 return retval;
2780 }
2781
2782 static int
2783 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
2784 {
2785 int retval;
2786
2787 retval = debug_target.to_insert_watchpoint (addr, len, type);
2788
2789 fprintf_unfiltered (gdb_stdlog,
2790 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
2791 (unsigned long) addr, len, type, (unsigned long) retval);
2792 return retval;
2793 }
2794
2795 static int
2796 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
2797 {
2798 int retval;
2799
2800 retval = debug_target.to_remove_watchpoint (addr, len, type);
2801
2802 fprintf_unfiltered (gdb_stdlog,
2803 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
2804 (unsigned long) addr, len, type, (unsigned long) retval);
2805 return retval;
2806 }
2807
2808 static void
2809 debug_to_terminal_init (void)
2810 {
2811 debug_target.to_terminal_init ();
2812
2813 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
2814 }
2815
2816 static void
2817 debug_to_terminal_inferior (void)
2818 {
2819 debug_target.to_terminal_inferior ();
2820
2821 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
2822 }
2823
2824 static void
2825 debug_to_terminal_ours_for_output (void)
2826 {
2827 debug_target.to_terminal_ours_for_output ();
2828
2829 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
2830 }
2831
2832 static void
2833 debug_to_terminal_ours (void)
2834 {
2835 debug_target.to_terminal_ours ();
2836
2837 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
2838 }
2839
2840 static void
2841 debug_to_terminal_save_ours (void)
2842 {
2843 debug_target.to_terminal_save_ours ();
2844
2845 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
2846 }
2847
2848 static void
2849 debug_to_terminal_info (char *arg, int from_tty)
2850 {
2851 debug_target.to_terminal_info (arg, from_tty);
2852
2853 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
2854 from_tty);
2855 }
2856
2857 static void
2858 debug_to_kill (void)
2859 {
2860 debug_target.to_kill ();
2861
2862 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
2863 }
2864
2865 static void
2866 debug_to_load (char *args, int from_tty)
2867 {
2868 debug_target.to_load (args, from_tty);
2869
2870 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
2871 }
2872
2873 static int
2874 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
2875 {
2876 int retval;
2877
2878 retval = debug_target.to_lookup_symbol (name, addrp);
2879
2880 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
2881
2882 return retval;
2883 }
2884
2885 static void
2886 debug_to_create_inferior (char *exec_file, char *args, char **env,
2887 int from_tty)
2888 {
2889 debug_target.to_create_inferior (exec_file, args, env, from_tty);
2890
2891 fprintf_unfiltered (gdb_stdlog, "target_create_inferior (%s, %s, xxx, %d)\n",
2892 exec_file, args, from_tty);
2893 }
2894
2895 static void
2896 debug_to_post_startup_inferior (ptid_t ptid)
2897 {
2898 debug_target.to_post_startup_inferior (ptid);
2899
2900 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
2901 PIDGET (ptid));
2902 }
2903
2904 static void
2905 debug_to_acknowledge_created_inferior (int pid)
2906 {
2907 debug_target.to_acknowledge_created_inferior (pid);
2908
2909 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
2910 pid);
2911 }
2912
2913 static void
2914 debug_to_insert_fork_catchpoint (int pid)
2915 {
2916 debug_target.to_insert_fork_catchpoint (pid);
2917
2918 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
2919 pid);
2920 }
2921
2922 static int
2923 debug_to_remove_fork_catchpoint (int pid)
2924 {
2925 int retval;
2926
2927 retval = debug_target.to_remove_fork_catchpoint (pid);
2928
2929 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
2930 pid, retval);
2931
2932 return retval;
2933 }
2934
2935 static void
2936 debug_to_insert_vfork_catchpoint (int pid)
2937 {
2938 debug_target.to_insert_vfork_catchpoint (pid);
2939
2940 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
2941 pid);
2942 }
2943
2944 static int
2945 debug_to_remove_vfork_catchpoint (int pid)
2946 {
2947 int retval;
2948
2949 retval = debug_target.to_remove_vfork_catchpoint (pid);
2950
2951 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
2952 pid, retval);
2953
2954 return retval;
2955 }
2956
2957 static void
2958 debug_to_insert_exec_catchpoint (int pid)
2959 {
2960 debug_target.to_insert_exec_catchpoint (pid);
2961
2962 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
2963 pid);
2964 }
2965
2966 static int
2967 debug_to_remove_exec_catchpoint (int pid)
2968 {
2969 int retval;
2970
2971 retval = debug_target.to_remove_exec_catchpoint (pid);
2972
2973 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
2974 pid, retval);
2975
2976 return retval;
2977 }
2978
2979 static int
2980 debug_to_has_exited (int pid, int wait_status, int *exit_status)
2981 {
2982 int has_exited;
2983
2984 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
2985
2986 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
2987 pid, wait_status, *exit_status, has_exited);
2988
2989 return has_exited;
2990 }
2991
2992 static void
2993 debug_to_mourn_inferior (void)
2994 {
2995 debug_target.to_mourn_inferior ();
2996
2997 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2998 }
2999
3000 static int
3001 debug_to_can_run (void)
3002 {
3003 int retval;
3004
3005 retval = debug_target.to_can_run ();
3006
3007 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3008
3009 return retval;
3010 }
3011
3012 static void
3013 debug_to_notice_signals (ptid_t ptid)
3014 {
3015 debug_target.to_notice_signals (ptid);
3016
3017 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3018 PIDGET (ptid));
3019 }
3020
3021 static int
3022 debug_to_thread_alive (ptid_t ptid)
3023 {
3024 int retval;
3025
3026 retval = debug_target.to_thread_alive (ptid);
3027
3028 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3029 PIDGET (ptid), retval);
3030
3031 return retval;
3032 }
3033
3034 static void
3035 debug_to_find_new_threads (void)
3036 {
3037 debug_target.to_find_new_threads ();
3038
3039 fputs_unfiltered ("target_find_new_threads ()\n", gdb_stdlog);
3040 }
3041
3042 static void
3043 debug_to_stop (ptid_t ptid)
3044 {
3045 debug_target.to_stop (ptid);
3046
3047 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3048 target_pid_to_str (ptid));
3049 }
3050
3051 static void
3052 debug_to_rcmd (char *command,
3053 struct ui_file *outbuf)
3054 {
3055 debug_target.to_rcmd (command, outbuf);
3056 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3057 }
3058
3059 static char *
3060 debug_to_pid_to_exec_file (int pid)
3061 {
3062 char *exec_file;
3063
3064 exec_file = debug_target.to_pid_to_exec_file (pid);
3065
3066 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3067 pid, exec_file);
3068
3069 return exec_file;
3070 }
3071
3072 static void
3073 setup_target_debug (void)
3074 {
3075 memcpy (&debug_target, &current_target, sizeof debug_target);
3076
3077 current_target.to_open = debug_to_open;
3078 current_target.to_close = debug_to_close;
3079 current_target.to_attach = debug_to_attach;
3080 current_target.to_post_attach = debug_to_post_attach;
3081 current_target.to_detach = debug_to_detach;
3082 current_target.to_resume = debug_to_resume;
3083 current_target.to_wait = debug_to_wait;
3084 current_target.to_fetch_registers = debug_to_fetch_registers;
3085 current_target.to_store_registers = debug_to_store_registers;
3086 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3087 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3088 current_target.to_files_info = debug_to_files_info;
3089 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3090 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3091 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3092 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3093 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3094 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3095 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3096 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3097 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3098 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3099 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3100 current_target.to_terminal_init = debug_to_terminal_init;
3101 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3102 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3103 current_target.to_terminal_ours = debug_to_terminal_ours;
3104 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3105 current_target.to_terminal_info = debug_to_terminal_info;
3106 current_target.to_kill = debug_to_kill;
3107 current_target.to_load = debug_to_load;
3108 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3109 current_target.to_create_inferior = debug_to_create_inferior;
3110 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3111 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3112 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3113 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3114 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3115 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3116 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3117 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3118 current_target.to_has_exited = debug_to_has_exited;
3119 current_target.to_mourn_inferior = debug_to_mourn_inferior;
3120 current_target.to_can_run = debug_to_can_run;
3121 current_target.to_notice_signals = debug_to_notice_signals;
3122 current_target.to_thread_alive = debug_to_thread_alive;
3123 current_target.to_find_new_threads = debug_to_find_new_threads;
3124 current_target.to_stop = debug_to_stop;
3125 current_target.to_rcmd = debug_to_rcmd;
3126 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3127 }
3128 \f
3129
3130 static char targ_desc[] =
3131 "Names of targets and files being debugged.\n\
3132 Shows the entire stack of targets currently in use (including the exec-file,\n\
3133 core-file, and process, if any), as well as the symbol file name.";
3134
3135 static void
3136 do_monitor_command (char *cmd,
3137 int from_tty)
3138 {
3139 if ((current_target.to_rcmd
3140 == (void (*) (char *, struct ui_file *)) tcomplain)
3141 || (current_target.to_rcmd == debug_to_rcmd
3142 && (debug_target.to_rcmd
3143 == (void (*) (char *, struct ui_file *)) tcomplain)))
3144 error (_("\"monitor\" command not supported by this target."));
3145 target_rcmd (cmd, gdb_stdtarg);
3146 }
3147
3148 /* Print the name of each layers of our target stack. */
3149
3150 static void
3151 maintenance_print_target_stack (char *cmd, int from_tty)
3152 {
3153 struct target_ops *t;
3154
3155 printf_filtered (_("The current target stack is:\n"));
3156
3157 for (t = target_stack; t != NULL; t = t->beneath)
3158 {
3159 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3160 }
3161 }
3162
3163 /* Controls if async mode is permitted. */
3164 int target_async_permitted = 0;
3165
3166 /* The set command writes to this variable. If the inferior is
3167 executing, linux_nat_async_permitted is *not* updated. */
3168 static int target_async_permitted_1 = 0;
3169
3170 static void
3171 set_maintenance_target_async_permitted (char *args, int from_tty,
3172 struct cmd_list_element *c)
3173 {
3174 if (target_has_execution)
3175 {
3176 target_async_permitted_1 = target_async_permitted;
3177 error (_("Cannot change this setting while the inferior is running."));
3178 }
3179
3180 target_async_permitted = target_async_permitted_1;
3181 }
3182
3183 static void
3184 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3185 struct cmd_list_element *c,
3186 const char *value)
3187 {
3188 fprintf_filtered (file, _("\
3189 Controlling the inferior in asynchronous mode is %s.\n"), value);
3190 }
3191
3192 void
3193 initialize_targets (void)
3194 {
3195 init_dummy_target ();
3196 push_target (&dummy_target);
3197
3198 add_info ("target", target_info, targ_desc);
3199 add_info ("files", target_info, targ_desc);
3200
3201 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3202 Set target debugging."), _("\
3203 Show target debugging."), _("\
3204 When non-zero, target debugging is enabled. Higher numbers are more\n\
3205 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3206 command."),
3207 NULL,
3208 show_targetdebug,
3209 &setdebuglist, &showdebuglist);
3210
3211 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3212 &trust_readonly, _("\
3213 Set mode for reading from readonly sections."), _("\
3214 Show mode for reading from readonly sections."), _("\
3215 When this mode is on, memory reads from readonly sections (such as .text)\n\
3216 will be read from the object file instead of from the target. This will\n\
3217 result in significant performance improvement for remote targets."),
3218 NULL,
3219 show_trust_readonly,
3220 &setlist, &showlist);
3221
3222 add_com ("monitor", class_obscure, do_monitor_command,
3223 _("Send a command to the remote monitor (remote targets only)."));
3224
3225 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3226 _("Print the name of each layer of the internal target stack."),
3227 &maintenanceprintlist);
3228
3229 add_setshow_boolean_cmd ("target-async", no_class,
3230 &target_async_permitted_1, _("\
3231 Set whether gdb controls the inferior in asynchronous mode."), _("\
3232 Show whether gdb controls the inferior in asynchronous mode."), _("\
3233 Tells gdb whether to control the inferior in asynchronous mode."),
3234 set_maintenance_target_async_permitted,
3235 show_maintenance_target_async_permitted,
3236 &setlist,
3237 &showlist);
3238
3239 target_dcache = dcache_init ();
3240 }
This page took 0.126165 seconds and 4 git commands to generate.