tweak previous entry to keep chronology right
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46
47 static void target_info (char *, int);
48
49 static void kill_or_be_killed (int);
50
51 static void default_terminal_info (char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
57
58 static int nosymbol (char *, CORE_ADDR *);
59
60 static void tcomplain (void) ATTR_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static void nosupport_runtime (void);
77
78 static LONGEST default_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST current_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex, gdb_byte *readbuf,
87 const gdb_byte *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static LONGEST target_xfer_partial (struct target_ops *ops,
91 enum target_object object,
92 const char *annex,
93 void *readbuf, const void *writebuf,
94 ULONGEST offset, LONGEST len);
95
96 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
97 ptid_t ptid);
98
99 static void init_dummy_target (void);
100
101 static struct target_ops debug_target;
102
103 static void debug_to_open (char *, int);
104
105 static void debug_to_prepare_to_store (struct regcache *);
106
107 static void debug_to_files_info (struct target_ops *);
108
109 static int debug_to_insert_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_remove_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_can_use_hw_breakpoint (int, int, int);
116
117 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
121 struct bp_target_info *);
122
123 static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
124
125 static int debug_to_remove_watchpoint (CORE_ADDR, int, int);
126
127 static int debug_to_stopped_by_watchpoint (void);
128
129 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
130
131 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
132 CORE_ADDR, CORE_ADDR, int);
133
134 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
151
152 static int debug_to_can_run (void);
153
154 static void debug_to_notice_signals (ptid_t);
155
156 static void debug_to_stop (ptid_t);
157
158 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
159 wierd and mysterious ways. Putting the variable here lets those
160 wierd and mysterious ways keep building while they are being
161 converted to the inferior inheritance structure. */
162 struct target_ops deprecated_child_ops;
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_index;
170 unsigned target_struct_allocsize;
171 #define DEFAULT_ALLOCSIZE 10
172
173 /* The initial current target, so that there is always a semi-valid
174 current target. */
175
176 static struct target_ops dummy_target;
177
178 /* Top of target stack. */
179
180 static struct target_ops *target_stack;
181
182 /* The target structure we are currently using to talk to a process
183 or file or whatever "inferior" we have. */
184
185 struct target_ops current_target;
186
187 /* Command list for target. */
188
189 static struct cmd_list_element *targetlist = NULL;
190
191 /* Nonzero if we should trust readonly sections from the
192 executable when reading memory. */
193
194 static int trust_readonly = 0;
195
196 /* Nonzero if we should show true memory content including
197 memory breakpoint inserted by gdb. */
198
199 static int show_memory_breakpoints = 0;
200
201 /* Non-zero if we want to see trace of target level stuff. */
202
203 static int targetdebug = 0;
204 static void
205 show_targetdebug (struct ui_file *file, int from_tty,
206 struct cmd_list_element *c, const char *value)
207 {
208 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
209 }
210
211 static void setup_target_debug (void);
212
213 /* The option sets this. */
214 static int stack_cache_enabled_p_1 = 1;
215 /* And set_stack_cache_enabled_p updates this.
216 The reason for the separation is so that we don't flush the cache for
217 on->on transitions. */
218 static int stack_cache_enabled_p = 1;
219
220 /* This is called *after* the stack-cache has been set.
221 Flush the cache for off->on and on->off transitions.
222 There's no real need to flush the cache for on->off transitions,
223 except cleanliness. */
224
225 static void
226 set_stack_cache_enabled_p (char *args, int from_tty,
227 struct cmd_list_element *c)
228 {
229 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
230 target_dcache_invalidate ();
231
232 stack_cache_enabled_p = stack_cache_enabled_p_1;
233 }
234
235 static void
236 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
237 struct cmd_list_element *c, const char *value)
238 {
239 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
240 }
241
242 /* Cache of memory operations, to speed up remote access. */
243 static DCACHE *target_dcache;
244
245 /* Invalidate the target dcache. */
246
247 void
248 target_dcache_invalidate (void)
249 {
250 dcache_invalidate (target_dcache);
251 }
252
253 /* The user just typed 'target' without the name of a target. */
254
255 static void
256 target_command (char *arg, int from_tty)
257 {
258 fputs_filtered ("Argument required (target name). Try `help target'\n",
259 gdb_stdout);
260 }
261
262 /* Default target_has_* methods for process_stratum targets. */
263
264 int
265 default_child_has_all_memory (struct target_ops *ops)
266 {
267 /* If no inferior selected, then we can't read memory here. */
268 if (ptid_equal (inferior_ptid, null_ptid))
269 return 0;
270
271 return 1;
272 }
273
274 int
275 default_child_has_memory (struct target_ops *ops)
276 {
277 /* If no inferior selected, then we can't read memory here. */
278 if (ptid_equal (inferior_ptid, null_ptid))
279 return 0;
280
281 return 1;
282 }
283
284 int
285 default_child_has_stack (struct target_ops *ops)
286 {
287 /* If no inferior selected, there's no stack. */
288 if (ptid_equal (inferior_ptid, null_ptid))
289 return 0;
290
291 return 1;
292 }
293
294 int
295 default_child_has_registers (struct target_ops *ops)
296 {
297 /* Can't read registers from no inferior. */
298 if (ptid_equal (inferior_ptid, null_ptid))
299 return 0;
300
301 return 1;
302 }
303
304 int
305 default_child_has_execution (struct target_ops *ops)
306 {
307 /* If there's no thread selected, then we can't make it run through
308 hoops. */
309 if (ptid_equal (inferior_ptid, null_ptid))
310 return 0;
311
312 return 1;
313 }
314
315
316 int
317 target_has_all_memory_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_all_memory (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_memory_1 (void)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_memory (t))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_stack_1 (void)
342 {
343 struct target_ops *t;
344
345 for (t = current_target.beneath; t != NULL; t = t->beneath)
346 if (t->to_has_stack (t))
347 return 1;
348
349 return 0;
350 }
351
352 int
353 target_has_registers_1 (void)
354 {
355 struct target_ops *t;
356
357 for (t = current_target.beneath; t != NULL; t = t->beneath)
358 if (t->to_has_registers (t))
359 return 1;
360
361 return 0;
362 }
363
364 int
365 target_has_execution_1 (void)
366 {
367 struct target_ops *t;
368
369 for (t = current_target.beneath; t != NULL; t = t->beneath)
370 if (t->to_has_execution (t))
371 return 1;
372
373 return 0;
374 }
375
376 /* Add a possible target architecture to the list. */
377
378 void
379 add_target (struct target_ops *t)
380 {
381 /* Provide default values for all "must have" methods. */
382 if (t->to_xfer_partial == NULL)
383 t->to_xfer_partial = default_xfer_partial;
384
385 if (t->to_has_all_memory == NULL)
386 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
387
388 if (t->to_has_memory == NULL)
389 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
390
391 if (t->to_has_stack == NULL)
392 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
393
394 if (t->to_has_registers == NULL)
395 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
396
397 if (t->to_has_execution == NULL)
398 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
399
400 if (!target_structs)
401 {
402 target_struct_allocsize = DEFAULT_ALLOCSIZE;
403 target_structs = (struct target_ops **) xmalloc
404 (target_struct_allocsize * sizeof (*target_structs));
405 }
406 if (target_struct_size >= target_struct_allocsize)
407 {
408 target_struct_allocsize *= 2;
409 target_structs = (struct target_ops **)
410 xrealloc ((char *) target_structs,
411 target_struct_allocsize * sizeof (*target_structs));
412 }
413 target_structs[target_struct_size++] = t;
414
415 if (targetlist == NULL)
416 add_prefix_cmd ("target", class_run, target_command, _("\
417 Connect to a target machine or process.\n\
418 The first argument is the type or protocol of the target machine.\n\
419 Remaining arguments are interpreted by the target protocol. For more\n\
420 information on the arguments for a particular protocol, type\n\
421 `help target ' followed by the protocol name."),
422 &targetlist, "target ", 0, &cmdlist);
423 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
424 }
425
426 /* Stub functions */
427
428 void
429 target_ignore (void)
430 {
431 }
432
433 void
434 target_kill (void)
435 {
436 struct target_ops *t;
437
438 for (t = current_target.beneath; t != NULL; t = t->beneath)
439 if (t->to_kill != NULL)
440 {
441 if (targetdebug)
442 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
443
444 t->to_kill (t);
445 return;
446 }
447
448 noprocess ();
449 }
450
451 void
452 target_load (char *arg, int from_tty)
453 {
454 target_dcache_invalidate ();
455 (*current_target.to_load) (arg, from_tty);
456 }
457
458 void
459 target_create_inferior (char *exec_file, char *args,
460 char **env, int from_tty)
461 {
462 struct target_ops *t;
463 for (t = current_target.beneath; t != NULL; t = t->beneath)
464 {
465 if (t->to_create_inferior != NULL)
466 {
467 t->to_create_inferior (t, exec_file, args, env, from_tty);
468 if (targetdebug)
469 fprintf_unfiltered (gdb_stdlog,
470 "target_create_inferior (%s, %s, xxx, %d)\n",
471 exec_file, args, from_tty);
472 return;
473 }
474 }
475
476 internal_error (__FILE__, __LINE__,
477 "could not find a target to create inferior");
478 }
479
480 void
481 target_terminal_inferior (void)
482 {
483 /* A background resume (``run&'') should leave GDB in control of the
484 terminal. Use target_can_async_p, not target_is_async_p, since at
485 this point the target is not async yet. However, if sync_execution
486 is not set, we know it will become async prior to resume. */
487 if (target_can_async_p () && !sync_execution)
488 return;
489
490 /* If GDB is resuming the inferior in the foreground, install
491 inferior's terminal modes. */
492 (*current_target.to_terminal_inferior) ();
493 }
494
495 static int
496 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
497 struct target_ops *t)
498 {
499 errno = EIO; /* Can't read/write this location */
500 return 0; /* No bytes handled */
501 }
502
503 static void
504 tcomplain (void)
505 {
506 error (_("You can't do that when your target is `%s'"),
507 current_target.to_shortname);
508 }
509
510 void
511 noprocess (void)
512 {
513 error (_("You can't do that without a process to debug."));
514 }
515
516 static int
517 nosymbol (char *name, CORE_ADDR *addrp)
518 {
519 return 1; /* Symbol does not exist in target env */
520 }
521
522 static void
523 nosupport_runtime (void)
524 {
525 if (ptid_equal (inferior_ptid, null_ptid))
526 noprocess ();
527 else
528 error (_("No run-time support for this"));
529 }
530
531
532 static void
533 default_terminal_info (char *args, int from_tty)
534 {
535 printf_unfiltered (_("No saved terminal information.\n"));
536 }
537
538 /* This is the default target_create_inferior and target_attach function.
539 If the current target is executing, it asks whether to kill it off.
540 If this function returns without calling error(), it has killed off
541 the target, and the operation should be attempted. */
542
543 static void
544 kill_or_be_killed (int from_tty)
545 {
546 if (target_has_execution)
547 {
548 printf_unfiltered (_("You are already running a program:\n"));
549 target_files_info ();
550 if (query (_("Kill it? ")))
551 {
552 target_kill ();
553 if (target_has_execution)
554 error (_("Killing the program did not help."));
555 return;
556 }
557 else
558 {
559 error (_("Program not killed."));
560 }
561 }
562 tcomplain ();
563 }
564
565 /* A default implementation for the to_get_ada_task_ptid target method.
566
567 This function builds the PTID by using both LWP and TID as part of
568 the PTID lwp and tid elements. The pid used is the pid of the
569 inferior_ptid. */
570
571 static ptid_t
572 default_get_ada_task_ptid (long lwp, long tid)
573 {
574 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
575 }
576
577 /* Go through the target stack from top to bottom, copying over zero
578 entries in current_target, then filling in still empty entries. In
579 effect, we are doing class inheritance through the pushed target
580 vectors.
581
582 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
583 is currently implemented, is that it discards any knowledge of
584 which target an inherited method originally belonged to.
585 Consequently, new new target methods should instead explicitly and
586 locally search the target stack for the target that can handle the
587 request. */
588
589 static void
590 update_current_target (void)
591 {
592 struct target_ops *t;
593
594 /* First, reset current's contents. */
595 memset (&current_target, 0, sizeof (current_target));
596
597 #define INHERIT(FIELD, TARGET) \
598 if (!current_target.FIELD) \
599 current_target.FIELD = (TARGET)->FIELD
600
601 for (t = target_stack; t; t = t->beneath)
602 {
603 INHERIT (to_shortname, t);
604 INHERIT (to_longname, t);
605 INHERIT (to_doc, t);
606 /* Do not inherit to_open. */
607 /* Do not inherit to_close. */
608 /* Do not inherit to_attach. */
609 INHERIT (to_post_attach, t);
610 INHERIT (to_attach_no_wait, t);
611 /* Do not inherit to_detach. */
612 /* Do not inherit to_disconnect. */
613 /* Do not inherit to_resume. */
614 /* Do not inherit to_wait. */
615 /* Do not inherit to_fetch_registers. */
616 /* Do not inherit to_store_registers. */
617 INHERIT (to_prepare_to_store, t);
618 INHERIT (deprecated_xfer_memory, t);
619 INHERIT (to_files_info, t);
620 INHERIT (to_insert_breakpoint, t);
621 INHERIT (to_remove_breakpoint, t);
622 INHERIT (to_can_use_hw_breakpoint, t);
623 INHERIT (to_insert_hw_breakpoint, t);
624 INHERIT (to_remove_hw_breakpoint, t);
625 INHERIT (to_insert_watchpoint, t);
626 INHERIT (to_remove_watchpoint, t);
627 INHERIT (to_stopped_data_address, t);
628 INHERIT (to_have_steppable_watchpoint, t);
629 INHERIT (to_have_continuable_watchpoint, t);
630 INHERIT (to_stopped_by_watchpoint, t);
631 INHERIT (to_watchpoint_addr_within_range, t);
632 INHERIT (to_region_ok_for_hw_watchpoint, t);
633 INHERIT (to_terminal_init, t);
634 INHERIT (to_terminal_inferior, t);
635 INHERIT (to_terminal_ours_for_output, t);
636 INHERIT (to_terminal_ours, t);
637 INHERIT (to_terminal_save_ours, t);
638 INHERIT (to_terminal_info, t);
639 /* Do not inherit to_kill. */
640 INHERIT (to_load, t);
641 INHERIT (to_lookup_symbol, t);
642 /* Do no inherit to_create_inferior. */
643 INHERIT (to_post_startup_inferior, t);
644 INHERIT (to_acknowledge_created_inferior, t);
645 INHERIT (to_insert_fork_catchpoint, t);
646 INHERIT (to_remove_fork_catchpoint, t);
647 INHERIT (to_insert_vfork_catchpoint, t);
648 INHERIT (to_remove_vfork_catchpoint, t);
649 /* Do not inherit to_follow_fork. */
650 INHERIT (to_insert_exec_catchpoint, t);
651 INHERIT (to_remove_exec_catchpoint, t);
652 INHERIT (to_set_syscall_catchpoint, t);
653 INHERIT (to_has_exited, t);
654 /* Do not inherit to_mourn_inferiour. */
655 INHERIT (to_can_run, t);
656 INHERIT (to_notice_signals, t);
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 INHERIT (to_extra_thread_info, t);
661 INHERIT (to_stop, t);
662 /* Do not inherit to_xfer_partial. */
663 INHERIT (to_rcmd, t);
664 INHERIT (to_pid_to_exec_file, t);
665 INHERIT (to_log_command, t);
666 INHERIT (to_stratum, t);
667 /* Do not inherit to_has_all_memory */
668 /* Do not inherit to_has_memory */
669 /* Do not inherit to_has_stack */
670 /* Do not inherit to_has_registers */
671 /* Do not inherit to_has_execution */
672 INHERIT (to_has_thread_control, t);
673 INHERIT (to_can_async_p, t);
674 INHERIT (to_is_async_p, t);
675 INHERIT (to_async, t);
676 INHERIT (to_async_mask, t);
677 INHERIT (to_find_memory_regions, t);
678 INHERIT (to_make_corefile_notes, t);
679 INHERIT (to_get_bookmark, t);
680 INHERIT (to_goto_bookmark, t);
681 /* Do not inherit to_get_thread_local_address. */
682 INHERIT (to_can_execute_reverse, t);
683 INHERIT (to_thread_architecture, t);
684 /* Do not inherit to_read_description. */
685 INHERIT (to_get_ada_task_ptid, t);
686 /* Do not inherit to_search_memory. */
687 INHERIT (to_supports_multi_process, t);
688 INHERIT (to_trace_init, t);
689 INHERIT (to_download_tracepoint, t);
690 INHERIT (to_download_trace_state_variable, t);
691 INHERIT (to_trace_set_readonly_regions, t);
692 INHERIT (to_trace_start, t);
693 INHERIT (to_get_trace_status, t);
694 INHERIT (to_trace_stop, t);
695 INHERIT (to_trace_find, t);
696 INHERIT (to_get_trace_state_variable_value, t);
697 INHERIT (to_set_disconnected_tracing, t);
698 INHERIT (to_magic, t);
699 /* Do not inherit to_memory_map. */
700 /* Do not inherit to_flash_erase. */
701 /* Do not inherit to_flash_done. */
702 }
703 #undef INHERIT
704
705 /* Clean up a target struct so it no longer has any zero pointers in
706 it. Some entries are defaulted to a method that print an error,
707 others are hard-wired to a standard recursive default. */
708
709 #define de_fault(field, value) \
710 if (!current_target.field) \
711 current_target.field = value
712
713 de_fault (to_open,
714 (void (*) (char *, int))
715 tcomplain);
716 de_fault (to_close,
717 (void (*) (int))
718 target_ignore);
719 de_fault (to_post_attach,
720 (void (*) (int))
721 target_ignore);
722 de_fault (to_prepare_to_store,
723 (void (*) (struct regcache *))
724 noprocess);
725 de_fault (deprecated_xfer_memory,
726 (int (*) (CORE_ADDR, gdb_byte *, int, int, struct mem_attrib *, struct target_ops *))
727 nomemory);
728 de_fault (to_files_info,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_insert_breakpoint,
732 memory_insert_breakpoint);
733 de_fault (to_remove_breakpoint,
734 memory_remove_breakpoint);
735 de_fault (to_can_use_hw_breakpoint,
736 (int (*) (int, int, int))
737 return_zero);
738 de_fault (to_insert_hw_breakpoint,
739 (int (*) (struct gdbarch *, struct bp_target_info *))
740 return_minus_one);
741 de_fault (to_remove_hw_breakpoint,
742 (int (*) (struct gdbarch *, struct bp_target_info *))
743 return_minus_one);
744 de_fault (to_insert_watchpoint,
745 (int (*) (CORE_ADDR, int, int))
746 return_minus_one);
747 de_fault (to_remove_watchpoint,
748 (int (*) (CORE_ADDR, int, int))
749 return_minus_one);
750 de_fault (to_stopped_by_watchpoint,
751 (int (*) (void))
752 return_zero);
753 de_fault (to_stopped_data_address,
754 (int (*) (struct target_ops *, CORE_ADDR *))
755 return_zero);
756 de_fault (to_watchpoint_addr_within_range,
757 default_watchpoint_addr_within_range);
758 de_fault (to_region_ok_for_hw_watchpoint,
759 default_region_ok_for_hw_watchpoint);
760 de_fault (to_terminal_init,
761 (void (*) (void))
762 target_ignore);
763 de_fault (to_terminal_inferior,
764 (void (*) (void))
765 target_ignore);
766 de_fault (to_terminal_ours_for_output,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_ours,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_save_ours,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_info,
776 default_terminal_info);
777 de_fault (to_load,
778 (void (*) (char *, int))
779 tcomplain);
780 de_fault (to_lookup_symbol,
781 (int (*) (char *, CORE_ADDR *))
782 nosymbol);
783 de_fault (to_post_startup_inferior,
784 (void (*) (ptid_t))
785 target_ignore);
786 de_fault (to_acknowledge_created_inferior,
787 (void (*) (int))
788 target_ignore);
789 de_fault (to_insert_fork_catchpoint,
790 (void (*) (int))
791 tcomplain);
792 de_fault (to_remove_fork_catchpoint,
793 (int (*) (int))
794 tcomplain);
795 de_fault (to_insert_vfork_catchpoint,
796 (void (*) (int))
797 tcomplain);
798 de_fault (to_remove_vfork_catchpoint,
799 (int (*) (int))
800 tcomplain);
801 de_fault (to_insert_exec_catchpoint,
802 (void (*) (int))
803 tcomplain);
804 de_fault (to_remove_exec_catchpoint,
805 (int (*) (int))
806 tcomplain);
807 de_fault (to_set_syscall_catchpoint,
808 (int (*) (int, int, int, int, int *))
809 tcomplain);
810 de_fault (to_has_exited,
811 (int (*) (int, int, int *))
812 return_zero);
813 de_fault (to_can_run,
814 return_zero);
815 de_fault (to_notice_signals,
816 (void (*) (ptid_t))
817 target_ignore);
818 de_fault (to_extra_thread_info,
819 (char *(*) (struct thread_info *))
820 return_zero);
821 de_fault (to_stop,
822 (void (*) (ptid_t))
823 target_ignore);
824 current_target.to_xfer_partial = current_xfer_partial;
825 de_fault (to_rcmd,
826 (void (*) (char *, struct ui_file *))
827 tcomplain);
828 de_fault (to_pid_to_exec_file,
829 (char *(*) (int))
830 return_zero);
831 de_fault (to_async,
832 (void (*) (void (*) (enum inferior_event_type, void*), void*))
833 tcomplain);
834 de_fault (to_async_mask,
835 (int (*) (int))
836 return_one);
837 de_fault (to_thread_architecture,
838 default_thread_architecture);
839 current_target.to_read_description = NULL;
840 de_fault (to_get_ada_task_ptid,
841 (ptid_t (*) (long, long))
842 default_get_ada_task_ptid);
843 de_fault (to_supports_multi_process,
844 (int (*) (void))
845 return_zero);
846 de_fault (to_trace_init,
847 (void (*) (void))
848 tcomplain);
849 de_fault (to_download_tracepoint,
850 (void (*) (struct breakpoint *))
851 tcomplain);
852 de_fault (to_download_trace_state_variable,
853 (void (*) (struct trace_state_variable *))
854 tcomplain);
855 de_fault (to_trace_set_readonly_regions,
856 (void (*) (void))
857 tcomplain);
858 de_fault (to_trace_start,
859 (void (*) (void))
860 tcomplain);
861 de_fault (to_get_trace_status,
862 (int (*) (int *))
863 return_minus_one);
864 de_fault (to_trace_stop,
865 (void (*) (void))
866 tcomplain);
867 de_fault (to_trace_find,
868 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
869 return_zero);
870 de_fault (to_get_trace_state_variable_value,
871 (int (*) (int, LONGEST *))
872 return_zero);
873 de_fault (to_set_disconnected_tracing,
874 (void (*) (int))
875 tcomplain);
876 #undef de_fault
877
878 /* Finally, position the target-stack beneath the squashed
879 "current_target". That way code looking for a non-inherited
880 target method can quickly and simply find it. */
881 current_target.beneath = target_stack;
882
883 if (targetdebug)
884 setup_target_debug ();
885 }
886
887 /* Push a new target type into the stack of the existing target accessors,
888 possibly superseding some of the existing accessors.
889
890 Result is zero if the pushed target ended up on top of the stack,
891 nonzero if at least one target is on top of it.
892
893 Rather than allow an empty stack, we always have the dummy target at
894 the bottom stratum, so we can call the function vectors without
895 checking them. */
896
897 int
898 push_target (struct target_ops *t)
899 {
900 struct target_ops **cur;
901
902 /* Check magic number. If wrong, it probably means someone changed
903 the struct definition, but not all the places that initialize one. */
904 if (t->to_magic != OPS_MAGIC)
905 {
906 fprintf_unfiltered (gdb_stderr,
907 "Magic number of %s target struct wrong\n",
908 t->to_shortname);
909 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
910 }
911
912 /* Find the proper stratum to install this target in. */
913 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
914 {
915 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
916 break;
917 }
918
919 /* If there's already targets at this stratum, remove them. */
920 /* FIXME: cagney/2003-10-15: I think this should be popping all
921 targets to CUR, and not just those at this stratum level. */
922 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
923 {
924 /* There's already something at this stratum level. Close it,
925 and un-hook it from the stack. */
926 struct target_ops *tmp = (*cur);
927 (*cur) = (*cur)->beneath;
928 tmp->beneath = NULL;
929 target_close (tmp, 0);
930 }
931
932 /* We have removed all targets in our stratum, now add the new one. */
933 t->beneath = (*cur);
934 (*cur) = t;
935
936 update_current_target ();
937
938 /* Not on top? */
939 return (t != target_stack);
940 }
941
942 /* Remove a target_ops vector from the stack, wherever it may be.
943 Return how many times it was removed (0 or 1). */
944
945 int
946 unpush_target (struct target_ops *t)
947 {
948 struct target_ops **cur;
949 struct target_ops *tmp;
950
951 if (t->to_stratum == dummy_stratum)
952 internal_error (__FILE__, __LINE__,
953 "Attempt to unpush the dummy target");
954
955 /* Look for the specified target. Note that we assume that a target
956 can only occur once in the target stack. */
957
958 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
959 {
960 if ((*cur) == t)
961 break;
962 }
963
964 if ((*cur) == NULL)
965 return 0; /* Didn't find target_ops, quit now */
966
967 /* NOTE: cagney/2003-12-06: In '94 the close call was made
968 unconditional by moving it to before the above check that the
969 target was in the target stack (something about "Change the way
970 pushing and popping of targets work to support target overlays
971 and inheritance"). This doesn't make much sense - only open
972 targets should be closed. */
973 target_close (t, 0);
974
975 /* Unchain the target */
976 tmp = (*cur);
977 (*cur) = (*cur)->beneath;
978 tmp->beneath = NULL;
979
980 update_current_target ();
981
982 return 1;
983 }
984
985 void
986 pop_target (void)
987 {
988 target_close (target_stack, 0); /* Let it clean up */
989 if (unpush_target (target_stack) == 1)
990 return;
991
992 fprintf_unfiltered (gdb_stderr,
993 "pop_target couldn't find target %s\n",
994 current_target.to_shortname);
995 internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
996 }
997
998 void
999 pop_all_targets_above (enum strata above_stratum, int quitting)
1000 {
1001 while ((int) (current_target.to_stratum) > (int) above_stratum)
1002 {
1003 target_close (target_stack, quitting);
1004 if (!unpush_target (target_stack))
1005 {
1006 fprintf_unfiltered (gdb_stderr,
1007 "pop_all_targets couldn't find target %s\n",
1008 target_stack->to_shortname);
1009 internal_error (__FILE__, __LINE__,
1010 _("failed internal consistency check"));
1011 break;
1012 }
1013 }
1014 }
1015
1016 void
1017 pop_all_targets (int quitting)
1018 {
1019 pop_all_targets_above (dummy_stratum, quitting);
1020 }
1021
1022 /* Using the objfile specified in OBJFILE, find the address for the
1023 current thread's thread-local storage with offset OFFSET. */
1024 CORE_ADDR
1025 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1026 {
1027 volatile CORE_ADDR addr = 0;
1028 struct target_ops *target;
1029
1030 for (target = current_target.beneath;
1031 target != NULL;
1032 target = target->beneath)
1033 {
1034 if (target->to_get_thread_local_address != NULL)
1035 break;
1036 }
1037
1038 if (target != NULL
1039 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1040 {
1041 ptid_t ptid = inferior_ptid;
1042 volatile struct gdb_exception ex;
1043
1044 TRY_CATCH (ex, RETURN_MASK_ALL)
1045 {
1046 CORE_ADDR lm_addr;
1047
1048 /* Fetch the load module address for this objfile. */
1049 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1050 objfile);
1051 /* If it's 0, throw the appropriate exception. */
1052 if (lm_addr == 0)
1053 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1054 _("TLS load module not found"));
1055
1056 addr = target->to_get_thread_local_address (target, ptid, lm_addr, offset);
1057 }
1058 /* If an error occurred, print TLS related messages here. Otherwise,
1059 throw the error to some higher catcher. */
1060 if (ex.reason < 0)
1061 {
1062 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1063
1064 switch (ex.error)
1065 {
1066 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1067 error (_("Cannot find thread-local variables in this thread library."));
1068 break;
1069 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1070 if (objfile_is_library)
1071 error (_("Cannot find shared library `%s' in dynamic"
1072 " linker's load module list"), objfile->name);
1073 else
1074 error (_("Cannot find executable file `%s' in dynamic"
1075 " linker's load module list"), objfile->name);
1076 break;
1077 case TLS_NOT_ALLOCATED_YET_ERROR:
1078 if (objfile_is_library)
1079 error (_("The inferior has not yet allocated storage for"
1080 " thread-local variables in\n"
1081 "the shared library `%s'\n"
1082 "for %s"),
1083 objfile->name, target_pid_to_str (ptid));
1084 else
1085 error (_("The inferior has not yet allocated storage for"
1086 " thread-local variables in\n"
1087 "the executable `%s'\n"
1088 "for %s"),
1089 objfile->name, target_pid_to_str (ptid));
1090 break;
1091 case TLS_GENERIC_ERROR:
1092 if (objfile_is_library)
1093 error (_("Cannot find thread-local storage for %s, "
1094 "shared library %s:\n%s"),
1095 target_pid_to_str (ptid),
1096 objfile->name, ex.message);
1097 else
1098 error (_("Cannot find thread-local storage for %s, "
1099 "executable file %s:\n%s"),
1100 target_pid_to_str (ptid),
1101 objfile->name, ex.message);
1102 break;
1103 default:
1104 throw_exception (ex);
1105 break;
1106 }
1107 }
1108 }
1109 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1110 TLS is an ABI-specific thing. But we don't do that yet. */
1111 else
1112 error (_("Cannot find thread-local variables on this target"));
1113
1114 return addr;
1115 }
1116
1117 #undef MIN
1118 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1119
1120 /* target_read_string -- read a null terminated string, up to LEN bytes,
1121 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1122 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1123 is responsible for freeing it. Return the number of bytes successfully
1124 read. */
1125
1126 int
1127 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1128 {
1129 int tlen, origlen, offset, i;
1130 gdb_byte buf[4];
1131 int errcode = 0;
1132 char *buffer;
1133 int buffer_allocated;
1134 char *bufptr;
1135 unsigned int nbytes_read = 0;
1136
1137 gdb_assert (string);
1138
1139 /* Small for testing. */
1140 buffer_allocated = 4;
1141 buffer = xmalloc (buffer_allocated);
1142 bufptr = buffer;
1143
1144 origlen = len;
1145
1146 while (len > 0)
1147 {
1148 tlen = MIN (len, 4 - (memaddr & 3));
1149 offset = memaddr & 3;
1150
1151 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1152 if (errcode != 0)
1153 {
1154 /* The transfer request might have crossed the boundary to an
1155 unallocated region of memory. Retry the transfer, requesting
1156 a single byte. */
1157 tlen = 1;
1158 offset = 0;
1159 errcode = target_read_memory (memaddr, buf, 1);
1160 if (errcode != 0)
1161 goto done;
1162 }
1163
1164 if (bufptr - buffer + tlen > buffer_allocated)
1165 {
1166 unsigned int bytes;
1167 bytes = bufptr - buffer;
1168 buffer_allocated *= 2;
1169 buffer = xrealloc (buffer, buffer_allocated);
1170 bufptr = buffer + bytes;
1171 }
1172
1173 for (i = 0; i < tlen; i++)
1174 {
1175 *bufptr++ = buf[i + offset];
1176 if (buf[i + offset] == '\000')
1177 {
1178 nbytes_read += i + 1;
1179 goto done;
1180 }
1181 }
1182
1183 memaddr += tlen;
1184 len -= tlen;
1185 nbytes_read += tlen;
1186 }
1187 done:
1188 *string = buffer;
1189 if (errnop != NULL)
1190 *errnop = errcode;
1191 return nbytes_read;
1192 }
1193
1194 struct target_section_table *
1195 target_get_section_table (struct target_ops *target)
1196 {
1197 struct target_ops *t;
1198
1199 if (targetdebug)
1200 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1201
1202 for (t = target; t != NULL; t = t->beneath)
1203 if (t->to_get_section_table != NULL)
1204 return (*t->to_get_section_table) (t);
1205
1206 return NULL;
1207 }
1208
1209 /* Find a section containing ADDR. */
1210
1211 struct target_section *
1212 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1213 {
1214 struct target_section_table *table = target_get_section_table (target);
1215 struct target_section *secp;
1216
1217 if (table == NULL)
1218 return NULL;
1219
1220 for (secp = table->sections; secp < table->sections_end; secp++)
1221 {
1222 if (addr >= secp->addr && addr < secp->endaddr)
1223 return secp;
1224 }
1225 return NULL;
1226 }
1227
1228 /* Perform a partial memory transfer.
1229 For docs see target.h, to_xfer_partial. */
1230
1231 static LONGEST
1232 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1233 void *readbuf, const void *writebuf, ULONGEST memaddr,
1234 LONGEST len)
1235 {
1236 LONGEST res;
1237 int reg_len;
1238 struct mem_region *region;
1239 struct inferior *inf;
1240
1241 /* Zero length requests are ok and require no work. */
1242 if (len == 0)
1243 return 0;
1244
1245 /* For accesses to unmapped overlay sections, read directly from
1246 files. Must do this first, as MEMADDR may need adjustment. */
1247 if (readbuf != NULL && overlay_debugging)
1248 {
1249 struct obj_section *section = find_pc_overlay (memaddr);
1250 if (pc_in_unmapped_range (memaddr, section))
1251 {
1252 struct target_section_table *table
1253 = target_get_section_table (ops);
1254 const char *section_name = section->the_bfd_section->name;
1255 memaddr = overlay_mapped_address (memaddr, section);
1256 return section_table_xfer_memory_partial (readbuf, writebuf,
1257 memaddr, len,
1258 table->sections,
1259 table->sections_end,
1260 section_name);
1261 }
1262 }
1263
1264 /* Try the executable files, if "trust-readonly-sections" is set. */
1265 if (readbuf != NULL && trust_readonly)
1266 {
1267 struct target_section *secp;
1268 struct target_section_table *table;
1269
1270 secp = target_section_by_addr (ops, memaddr);
1271 if (secp != NULL
1272 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1273 & SEC_READONLY))
1274 {
1275 table = target_get_section_table (ops);
1276 return section_table_xfer_memory_partial (readbuf, writebuf,
1277 memaddr, len,
1278 table->sections,
1279 table->sections_end,
1280 NULL);
1281 }
1282 }
1283
1284 /* Try GDB's internal data cache. */
1285 region = lookup_mem_region (memaddr);
1286 /* region->hi == 0 means there's no upper bound. */
1287 if (memaddr + len < region->hi || region->hi == 0)
1288 reg_len = len;
1289 else
1290 reg_len = region->hi - memaddr;
1291
1292 switch (region->attrib.mode)
1293 {
1294 case MEM_RO:
1295 if (writebuf != NULL)
1296 return -1;
1297 break;
1298
1299 case MEM_WO:
1300 if (readbuf != NULL)
1301 return -1;
1302 break;
1303
1304 case MEM_FLASH:
1305 /* We only support writing to flash during "load" for now. */
1306 if (writebuf != NULL)
1307 error (_("Writing to flash memory forbidden in this context"));
1308 break;
1309
1310 case MEM_NONE:
1311 return -1;
1312 }
1313
1314 if (!ptid_equal (inferior_ptid, null_ptid))
1315 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1316 else
1317 inf = NULL;
1318
1319 if (inf != NULL
1320 && (region->attrib.cache
1321 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1322 {
1323 if (readbuf != NULL)
1324 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1325 reg_len, 0);
1326 else
1327 /* FIXME drow/2006-08-09: If we're going to preserve const
1328 correctness dcache_xfer_memory should take readbuf and
1329 writebuf. */
1330 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1331 (void *) writebuf,
1332 reg_len, 1);
1333 if (res <= 0)
1334 return -1;
1335 else
1336 {
1337 if (readbuf && !show_memory_breakpoints)
1338 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1339 return res;
1340 }
1341 }
1342
1343 /* If none of those methods found the memory we wanted, fall back
1344 to a target partial transfer. Normally a single call to
1345 to_xfer_partial is enough; if it doesn't recognize an object
1346 it will call the to_xfer_partial of the next target down.
1347 But for memory this won't do. Memory is the only target
1348 object which can be read from more than one valid target.
1349 A core file, for instance, could have some of memory but
1350 delegate other bits to the target below it. So, we must
1351 manually try all targets. */
1352
1353 do
1354 {
1355 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1356 readbuf, writebuf, memaddr, reg_len);
1357 if (res > 0)
1358 break;
1359
1360 /* We want to continue past core files to executables, but not
1361 past a running target's memory. */
1362 if (ops->to_has_all_memory (ops))
1363 break;
1364
1365 ops = ops->beneath;
1366 }
1367 while (ops != NULL);
1368
1369 if (readbuf && !show_memory_breakpoints)
1370 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1371
1372 /* Make sure the cache gets updated no matter what - if we are writing
1373 to the stack. Even if this write is not tagged as such, we still need
1374 to update the cache. */
1375
1376 if (res > 0
1377 && inf != NULL
1378 && writebuf != NULL
1379 && !region->attrib.cache
1380 && stack_cache_enabled_p
1381 && object != TARGET_OBJECT_STACK_MEMORY)
1382 {
1383 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1384 }
1385
1386 /* If we still haven't got anything, return the last error. We
1387 give up. */
1388 return res;
1389 }
1390
1391 static void
1392 restore_show_memory_breakpoints (void *arg)
1393 {
1394 show_memory_breakpoints = (uintptr_t) arg;
1395 }
1396
1397 struct cleanup *
1398 make_show_memory_breakpoints_cleanup (int show)
1399 {
1400 int current = show_memory_breakpoints;
1401 show_memory_breakpoints = show;
1402
1403 return make_cleanup (restore_show_memory_breakpoints,
1404 (void *) (uintptr_t) current);
1405 }
1406
1407 /* For docs see target.h, to_xfer_partial. */
1408
1409 static LONGEST
1410 target_xfer_partial (struct target_ops *ops,
1411 enum target_object object, const char *annex,
1412 void *readbuf, const void *writebuf,
1413 ULONGEST offset, LONGEST len)
1414 {
1415 LONGEST retval;
1416
1417 gdb_assert (ops->to_xfer_partial != NULL);
1418
1419 /* If this is a memory transfer, let the memory-specific code
1420 have a look at it instead. Memory transfers are more
1421 complicated. */
1422 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1423 retval = memory_xfer_partial (ops, object, readbuf,
1424 writebuf, offset, len);
1425 else
1426 {
1427 enum target_object raw_object = object;
1428
1429 /* If this is a raw memory transfer, request the normal
1430 memory object from other layers. */
1431 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1432 raw_object = TARGET_OBJECT_MEMORY;
1433
1434 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1435 writebuf, offset, len);
1436 }
1437
1438 if (targetdebug)
1439 {
1440 const unsigned char *myaddr = NULL;
1441
1442 fprintf_unfiltered (gdb_stdlog,
1443 "%s:target_xfer_partial (%d, %s, %s, %s, %s, %s) = %s",
1444 ops->to_shortname,
1445 (int) object,
1446 (annex ? annex : "(null)"),
1447 host_address_to_string (readbuf),
1448 host_address_to_string (writebuf),
1449 core_addr_to_string_nz (offset),
1450 plongest (len), plongest (retval));
1451
1452 if (readbuf)
1453 myaddr = readbuf;
1454 if (writebuf)
1455 myaddr = writebuf;
1456 if (retval > 0 && myaddr != NULL)
1457 {
1458 int i;
1459
1460 fputs_unfiltered (", bytes =", gdb_stdlog);
1461 for (i = 0; i < retval; i++)
1462 {
1463 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1464 {
1465 if (targetdebug < 2 && i > 0)
1466 {
1467 fprintf_unfiltered (gdb_stdlog, " ...");
1468 break;
1469 }
1470 fprintf_unfiltered (gdb_stdlog, "\n");
1471 }
1472
1473 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1474 }
1475 }
1476
1477 fputc_unfiltered ('\n', gdb_stdlog);
1478 }
1479 return retval;
1480 }
1481
1482 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1483 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1484 if any error occurs.
1485
1486 If an error occurs, no guarantee is made about the contents of the data at
1487 MYADDR. In particular, the caller should not depend upon partial reads
1488 filling the buffer with good data. There is no way for the caller to know
1489 how much good data might have been transfered anyway. Callers that can
1490 deal with partial reads should call target_read (which will retry until
1491 it makes no progress, and then return how much was transferred). */
1492
1493 int
1494 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1495 {
1496 /* Dispatch to the topmost target, not the flattened current_target.
1497 Memory accesses check target->to_has_(all_)memory, and the
1498 flattened target doesn't inherit those. */
1499 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1500 myaddr, memaddr, len) == len)
1501 return 0;
1502 else
1503 return EIO;
1504 }
1505
1506 /* Like target_read_memory, but specify explicitly that this is a read from
1507 the target's stack. This may trigger different cache behavior. */
1508
1509 int
1510 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1511 {
1512 /* Dispatch to the topmost target, not the flattened current_target.
1513 Memory accesses check target->to_has_(all_)memory, and the
1514 flattened target doesn't inherit those. */
1515
1516 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1517 myaddr, memaddr, len) == len)
1518 return 0;
1519 else
1520 return EIO;
1521 }
1522
1523 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1524 Returns either 0 for success or an errno value if any error occurs.
1525 If an error occurs, no guarantee is made about how much data got written.
1526 Callers that can deal with partial writes should call target_write. */
1527
1528 int
1529 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1530 {
1531 /* Dispatch to the topmost target, not the flattened current_target.
1532 Memory accesses check target->to_has_(all_)memory, and the
1533 flattened target doesn't inherit those. */
1534 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1535 myaddr, memaddr, len) == len)
1536 return 0;
1537 else
1538 return EIO;
1539 }
1540
1541 /* Fetch the target's memory map. */
1542
1543 VEC(mem_region_s) *
1544 target_memory_map (void)
1545 {
1546 VEC(mem_region_s) *result;
1547 struct mem_region *last_one, *this_one;
1548 int ix;
1549 struct target_ops *t;
1550
1551 if (targetdebug)
1552 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1553
1554 for (t = current_target.beneath; t != NULL; t = t->beneath)
1555 if (t->to_memory_map != NULL)
1556 break;
1557
1558 if (t == NULL)
1559 return NULL;
1560
1561 result = t->to_memory_map (t);
1562 if (result == NULL)
1563 return NULL;
1564
1565 qsort (VEC_address (mem_region_s, result),
1566 VEC_length (mem_region_s, result),
1567 sizeof (struct mem_region), mem_region_cmp);
1568
1569 /* Check that regions do not overlap. Simultaneously assign
1570 a numbering for the "mem" commands to use to refer to
1571 each region. */
1572 last_one = NULL;
1573 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1574 {
1575 this_one->number = ix;
1576
1577 if (last_one && last_one->hi > this_one->lo)
1578 {
1579 warning (_("Overlapping regions in memory map: ignoring"));
1580 VEC_free (mem_region_s, result);
1581 return NULL;
1582 }
1583 last_one = this_one;
1584 }
1585
1586 return result;
1587 }
1588
1589 void
1590 target_flash_erase (ULONGEST address, LONGEST length)
1591 {
1592 struct target_ops *t;
1593
1594 for (t = current_target.beneath; t != NULL; t = t->beneath)
1595 if (t->to_flash_erase != NULL)
1596 {
1597 if (targetdebug)
1598 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1599 hex_string (address), phex (length, 0));
1600 t->to_flash_erase (t, address, length);
1601 return;
1602 }
1603
1604 tcomplain ();
1605 }
1606
1607 void
1608 target_flash_done (void)
1609 {
1610 struct target_ops *t;
1611
1612 for (t = current_target.beneath; t != NULL; t = t->beneath)
1613 if (t->to_flash_done != NULL)
1614 {
1615 if (targetdebug)
1616 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1617 t->to_flash_done (t);
1618 return;
1619 }
1620
1621 tcomplain ();
1622 }
1623
1624 static void
1625 show_trust_readonly (struct ui_file *file, int from_tty,
1626 struct cmd_list_element *c, const char *value)
1627 {
1628 fprintf_filtered (file, _("\
1629 Mode for reading from readonly sections is %s.\n"),
1630 value);
1631 }
1632
1633 /* More generic transfers. */
1634
1635 static LONGEST
1636 default_xfer_partial (struct target_ops *ops, enum target_object object,
1637 const char *annex, gdb_byte *readbuf,
1638 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1639 {
1640 if (object == TARGET_OBJECT_MEMORY
1641 && ops->deprecated_xfer_memory != NULL)
1642 /* If available, fall back to the target's
1643 "deprecated_xfer_memory" method. */
1644 {
1645 int xfered = -1;
1646 errno = 0;
1647 if (writebuf != NULL)
1648 {
1649 void *buffer = xmalloc (len);
1650 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1651 memcpy (buffer, writebuf, len);
1652 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1653 1/*write*/, NULL, ops);
1654 do_cleanups (cleanup);
1655 }
1656 if (readbuf != NULL)
1657 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1658 0/*read*/, NULL, ops);
1659 if (xfered > 0)
1660 return xfered;
1661 else if (xfered == 0 && errno == 0)
1662 /* "deprecated_xfer_memory" uses 0, cross checked against
1663 ERRNO as one indication of an error. */
1664 return 0;
1665 else
1666 return -1;
1667 }
1668 else if (ops->beneath != NULL)
1669 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1670 readbuf, writebuf, offset, len);
1671 else
1672 return -1;
1673 }
1674
1675 /* The xfer_partial handler for the topmost target. Unlike the default,
1676 it does not need to handle memory specially; it just passes all
1677 requests down the stack. */
1678
1679 static LONGEST
1680 current_xfer_partial (struct target_ops *ops, enum target_object object,
1681 const char *annex, gdb_byte *readbuf,
1682 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1683 {
1684 if (ops->beneath != NULL)
1685 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1686 readbuf, writebuf, offset, len);
1687 else
1688 return -1;
1689 }
1690
1691 /* Target vector read/write partial wrapper functions. */
1692
1693 static LONGEST
1694 target_read_partial (struct target_ops *ops,
1695 enum target_object object,
1696 const char *annex, gdb_byte *buf,
1697 ULONGEST offset, LONGEST len)
1698 {
1699 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1700 }
1701
1702 static LONGEST
1703 target_write_partial (struct target_ops *ops,
1704 enum target_object object,
1705 const char *annex, const gdb_byte *buf,
1706 ULONGEST offset, LONGEST len)
1707 {
1708 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1709 }
1710
1711 /* Wrappers to perform the full transfer. */
1712
1713 /* For docs on target_read see target.h. */
1714
1715 LONGEST
1716 target_read (struct target_ops *ops,
1717 enum target_object object,
1718 const char *annex, gdb_byte *buf,
1719 ULONGEST offset, LONGEST len)
1720 {
1721 LONGEST xfered = 0;
1722 while (xfered < len)
1723 {
1724 LONGEST xfer = target_read_partial (ops, object, annex,
1725 (gdb_byte *) buf + xfered,
1726 offset + xfered, len - xfered);
1727 /* Call an observer, notifying them of the xfer progress? */
1728 if (xfer == 0)
1729 return xfered;
1730 if (xfer < 0)
1731 return -1;
1732 xfered += xfer;
1733 QUIT;
1734 }
1735 return len;
1736 }
1737
1738 LONGEST
1739 target_read_until_error (struct target_ops *ops,
1740 enum target_object object,
1741 const char *annex, gdb_byte *buf,
1742 ULONGEST offset, LONGEST len)
1743 {
1744 LONGEST xfered = 0;
1745 while (xfered < len)
1746 {
1747 LONGEST xfer = target_read_partial (ops, object, annex,
1748 (gdb_byte *) buf + xfered,
1749 offset + xfered, len - xfered);
1750 /* Call an observer, notifying them of the xfer progress? */
1751 if (xfer == 0)
1752 return xfered;
1753 if (xfer < 0)
1754 {
1755 /* We've got an error. Try to read in smaller blocks. */
1756 ULONGEST start = offset + xfered;
1757 ULONGEST remaining = len - xfered;
1758 ULONGEST half;
1759
1760 /* If an attempt was made to read a random memory address,
1761 it's likely that the very first byte is not accessible.
1762 Try reading the first byte, to avoid doing log N tries
1763 below. */
1764 xfer = target_read_partial (ops, object, annex,
1765 (gdb_byte *) buf + xfered, start, 1);
1766 if (xfer <= 0)
1767 return xfered;
1768 start += 1;
1769 remaining -= 1;
1770 half = remaining/2;
1771
1772 while (half > 0)
1773 {
1774 xfer = target_read_partial (ops, object, annex,
1775 (gdb_byte *) buf + xfered,
1776 start, half);
1777 if (xfer == 0)
1778 return xfered;
1779 if (xfer < 0)
1780 {
1781 remaining = half;
1782 }
1783 else
1784 {
1785 /* We have successfully read the first half. So, the
1786 error must be in the second half. Adjust start and
1787 remaining to point at the second half. */
1788 xfered += xfer;
1789 start += xfer;
1790 remaining -= xfer;
1791 }
1792 half = remaining/2;
1793 }
1794
1795 return xfered;
1796 }
1797 xfered += xfer;
1798 QUIT;
1799 }
1800 return len;
1801 }
1802
1803 /* An alternative to target_write with progress callbacks. */
1804
1805 LONGEST
1806 target_write_with_progress (struct target_ops *ops,
1807 enum target_object object,
1808 const char *annex, const gdb_byte *buf,
1809 ULONGEST offset, LONGEST len,
1810 void (*progress) (ULONGEST, void *), void *baton)
1811 {
1812 LONGEST xfered = 0;
1813
1814 /* Give the progress callback a chance to set up. */
1815 if (progress)
1816 (*progress) (0, baton);
1817
1818 while (xfered < len)
1819 {
1820 LONGEST xfer = target_write_partial (ops, object, annex,
1821 (gdb_byte *) buf + xfered,
1822 offset + xfered, len - xfered);
1823
1824 if (xfer == 0)
1825 return xfered;
1826 if (xfer < 0)
1827 return -1;
1828
1829 if (progress)
1830 (*progress) (xfer, baton);
1831
1832 xfered += xfer;
1833 QUIT;
1834 }
1835 return len;
1836 }
1837
1838 /* For docs on target_write see target.h. */
1839
1840 LONGEST
1841 target_write (struct target_ops *ops,
1842 enum target_object object,
1843 const char *annex, const gdb_byte *buf,
1844 ULONGEST offset, LONGEST len)
1845 {
1846 return target_write_with_progress (ops, object, annex, buf, offset, len,
1847 NULL, NULL);
1848 }
1849
1850 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1851 the size of the transferred data. PADDING additional bytes are
1852 available in *BUF_P. This is a helper function for
1853 target_read_alloc; see the declaration of that function for more
1854 information. */
1855
1856 static LONGEST
1857 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1858 const char *annex, gdb_byte **buf_p, int padding)
1859 {
1860 size_t buf_alloc, buf_pos;
1861 gdb_byte *buf;
1862 LONGEST n;
1863
1864 /* This function does not have a length parameter; it reads the
1865 entire OBJECT). Also, it doesn't support objects fetched partly
1866 from one target and partly from another (in a different stratum,
1867 e.g. a core file and an executable). Both reasons make it
1868 unsuitable for reading memory. */
1869 gdb_assert (object != TARGET_OBJECT_MEMORY);
1870
1871 /* Start by reading up to 4K at a time. The target will throttle
1872 this number down if necessary. */
1873 buf_alloc = 4096;
1874 buf = xmalloc (buf_alloc);
1875 buf_pos = 0;
1876 while (1)
1877 {
1878 n = target_read_partial (ops, object, annex, &buf[buf_pos],
1879 buf_pos, buf_alloc - buf_pos - padding);
1880 if (n < 0)
1881 {
1882 /* An error occurred. */
1883 xfree (buf);
1884 return -1;
1885 }
1886 else if (n == 0)
1887 {
1888 /* Read all there was. */
1889 if (buf_pos == 0)
1890 xfree (buf);
1891 else
1892 *buf_p = buf;
1893 return buf_pos;
1894 }
1895
1896 buf_pos += n;
1897
1898 /* If the buffer is filling up, expand it. */
1899 if (buf_alloc < buf_pos * 2)
1900 {
1901 buf_alloc *= 2;
1902 buf = xrealloc (buf, buf_alloc);
1903 }
1904
1905 QUIT;
1906 }
1907 }
1908
1909 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1910 the size of the transferred data. See the declaration in "target.h"
1911 function for more information about the return value. */
1912
1913 LONGEST
1914 target_read_alloc (struct target_ops *ops, enum target_object object,
1915 const char *annex, gdb_byte **buf_p)
1916 {
1917 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1918 }
1919
1920 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1921 returned as a string, allocated using xmalloc. If an error occurs
1922 or the transfer is unsupported, NULL is returned. Empty objects
1923 are returned as allocated but empty strings. A warning is issued
1924 if the result contains any embedded NUL bytes. */
1925
1926 char *
1927 target_read_stralloc (struct target_ops *ops, enum target_object object,
1928 const char *annex)
1929 {
1930 gdb_byte *buffer;
1931 LONGEST transferred;
1932
1933 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1934
1935 if (transferred < 0)
1936 return NULL;
1937
1938 if (transferred == 0)
1939 return xstrdup ("");
1940
1941 buffer[transferred] = 0;
1942 if (strlen (buffer) < transferred)
1943 warning (_("target object %d, annex %s, "
1944 "contained unexpected null characters"),
1945 (int) object, annex ? annex : "(none)");
1946
1947 return (char *) buffer;
1948 }
1949
1950 /* Memory transfer methods. */
1951
1952 void
1953 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1954 LONGEST len)
1955 {
1956 /* This method is used to read from an alternate, non-current
1957 target. This read must bypass the overlay support (as symbols
1958 don't match this target), and GDB's internal cache (wrong cache
1959 for this target). */
1960 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1961 != len)
1962 memory_error (EIO, addr);
1963 }
1964
1965 ULONGEST
1966 get_target_memory_unsigned (struct target_ops *ops,
1967 CORE_ADDR addr, int len, enum bfd_endian byte_order)
1968 {
1969 gdb_byte buf[sizeof (ULONGEST)];
1970
1971 gdb_assert (len <= sizeof (buf));
1972 get_target_memory (ops, addr, buf, len);
1973 return extract_unsigned_integer (buf, len, byte_order);
1974 }
1975
1976 static void
1977 target_info (char *args, int from_tty)
1978 {
1979 struct target_ops *t;
1980 int has_all_mem = 0;
1981
1982 if (symfile_objfile != NULL)
1983 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
1984
1985 for (t = target_stack; t != NULL; t = t->beneath)
1986 {
1987 if (!(*t->to_has_memory) (t))
1988 continue;
1989
1990 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1991 continue;
1992 if (has_all_mem)
1993 printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
1994 printf_unfiltered ("%s:\n", t->to_longname);
1995 (t->to_files_info) (t);
1996 has_all_mem = (*t->to_has_all_memory) (t);
1997 }
1998 }
1999
2000 /* This function is called before any new inferior is created, e.g.
2001 by running a program, attaching, or connecting to a target.
2002 It cleans up any state from previous invocations which might
2003 change between runs. This is a subset of what target_preopen
2004 resets (things which might change between targets). */
2005
2006 void
2007 target_pre_inferior (int from_tty)
2008 {
2009 /* Clear out solib state. Otherwise the solib state of the previous
2010 inferior might have survived and is entirely wrong for the new
2011 target. This has been observed on GNU/Linux using glibc 2.3. How
2012 to reproduce:
2013
2014 bash$ ./foo&
2015 [1] 4711
2016 bash$ ./foo&
2017 [1] 4712
2018 bash$ gdb ./foo
2019 [...]
2020 (gdb) attach 4711
2021 (gdb) detach
2022 (gdb) attach 4712
2023 Cannot access memory at address 0xdeadbeef
2024 */
2025
2026 /* In some OSs, the shared library list is the same/global/shared
2027 across inferiors. If code is shared between processes, so are
2028 memory regions and features. */
2029 if (!gdbarch_has_global_solist (target_gdbarch))
2030 {
2031 no_shared_libraries (NULL, from_tty);
2032
2033 invalidate_target_mem_regions ();
2034
2035 target_clear_description ();
2036 }
2037 }
2038
2039 /* Callback for iterate_over_inferiors. Gets rid of the given
2040 inferior. */
2041
2042 static int
2043 dispose_inferior (struct inferior *inf, void *args)
2044 {
2045 struct thread_info *thread;
2046
2047 thread = any_thread_of_process (inf->pid);
2048 if (thread)
2049 {
2050 switch_to_thread (thread->ptid);
2051
2052 /* Core inferiors actually should be detached, not killed. */
2053 if (target_has_execution)
2054 target_kill ();
2055 else
2056 target_detach (NULL, 0);
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* This is to be called by the open routine before it does
2063 anything. */
2064
2065 void
2066 target_preopen (int from_tty)
2067 {
2068 dont_repeat ();
2069
2070 if (have_inferiors ())
2071 {
2072 if (!from_tty
2073 || !have_live_inferiors ()
2074 || query (_("A program is being debugged already. Kill it? ")))
2075 iterate_over_inferiors (dispose_inferior, NULL);
2076 else
2077 error (_("Program not killed."));
2078 }
2079
2080 /* Calling target_kill may remove the target from the stack. But if
2081 it doesn't (which seems like a win for UDI), remove it now. */
2082 /* Leave the exec target, though. The user may be switching from a
2083 live process to a core of the same program. */
2084 pop_all_targets_above (file_stratum, 0);
2085
2086 target_pre_inferior (from_tty);
2087 }
2088
2089 /* Detach a target after doing deferred register stores. */
2090
2091 void
2092 target_detach (char *args, int from_tty)
2093 {
2094 struct target_ops* t;
2095
2096 if (gdbarch_has_global_breakpoints (target_gdbarch))
2097 /* Don't remove global breakpoints here. They're removed on
2098 disconnection from the target. */
2099 ;
2100 else
2101 /* If we're in breakpoints-always-inserted mode, have to remove
2102 them before detaching. */
2103 remove_breakpoints_pid (PIDGET (inferior_ptid));
2104
2105 for (t = current_target.beneath; t != NULL; t = t->beneath)
2106 {
2107 if (t->to_detach != NULL)
2108 {
2109 t->to_detach (t, args, from_tty);
2110 if (targetdebug)
2111 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2112 args, from_tty);
2113 return;
2114 }
2115 }
2116
2117 internal_error (__FILE__, __LINE__, "could not find a target to detach");
2118 }
2119
2120 void
2121 target_disconnect (char *args, int from_tty)
2122 {
2123 struct target_ops *t;
2124
2125 /* If we're in breakpoints-always-inserted mode or if breakpoints
2126 are global across processes, we have to remove them before
2127 disconnecting. */
2128 remove_breakpoints ();
2129
2130 for (t = current_target.beneath; t != NULL; t = t->beneath)
2131 if (t->to_disconnect != NULL)
2132 {
2133 if (targetdebug)
2134 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2135 args, from_tty);
2136 t->to_disconnect (t, args, from_tty);
2137 return;
2138 }
2139
2140 tcomplain ();
2141 }
2142
2143 ptid_t
2144 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2145 {
2146 struct target_ops *t;
2147
2148 for (t = current_target.beneath; t != NULL; t = t->beneath)
2149 {
2150 if (t->to_wait != NULL)
2151 {
2152 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2153
2154 if (targetdebug)
2155 {
2156 char *status_string;
2157
2158 status_string = target_waitstatus_to_string (status);
2159 fprintf_unfiltered (gdb_stdlog,
2160 "target_wait (%d, status) = %d, %s\n",
2161 PIDGET (ptid), PIDGET (retval),
2162 status_string);
2163 xfree (status_string);
2164 }
2165
2166 return retval;
2167 }
2168 }
2169
2170 noprocess ();
2171 }
2172
2173 char *
2174 target_pid_to_str (ptid_t ptid)
2175 {
2176 struct target_ops *t;
2177
2178 for (t = current_target.beneath; t != NULL; t = t->beneath)
2179 {
2180 if (t->to_pid_to_str != NULL)
2181 return (*t->to_pid_to_str) (t, ptid);
2182 }
2183
2184 return normal_pid_to_str (ptid);
2185 }
2186
2187 void
2188 target_resume (ptid_t ptid, int step, enum target_signal signal)
2189 {
2190 struct target_ops *t;
2191
2192 target_dcache_invalidate ();
2193
2194 for (t = current_target.beneath; t != NULL; t = t->beneath)
2195 {
2196 if (t->to_resume != NULL)
2197 {
2198 t->to_resume (t, ptid, step, signal);
2199 if (targetdebug)
2200 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2201 PIDGET (ptid),
2202 step ? "step" : "continue",
2203 target_signal_to_name (signal));
2204
2205 set_executing (ptid, 1);
2206 set_running (ptid, 1);
2207 clear_inline_frame_state (ptid);
2208 return;
2209 }
2210 }
2211
2212 noprocess ();
2213 }
2214 /* Look through the list of possible targets for a target that can
2215 follow forks. */
2216
2217 int
2218 target_follow_fork (int follow_child)
2219 {
2220 struct target_ops *t;
2221
2222 for (t = current_target.beneath; t != NULL; t = t->beneath)
2223 {
2224 if (t->to_follow_fork != NULL)
2225 {
2226 int retval = t->to_follow_fork (t, follow_child);
2227 if (targetdebug)
2228 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2229 follow_child, retval);
2230 return retval;
2231 }
2232 }
2233
2234 /* Some target returned a fork event, but did not know how to follow it. */
2235 internal_error (__FILE__, __LINE__,
2236 "could not find a target to follow fork");
2237 }
2238
2239 void
2240 target_mourn_inferior (void)
2241 {
2242 struct target_ops *t;
2243 for (t = current_target.beneath; t != NULL; t = t->beneath)
2244 {
2245 if (t->to_mourn_inferior != NULL)
2246 {
2247 t->to_mourn_inferior (t);
2248 if (targetdebug)
2249 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2250
2251 /* We no longer need to keep handles on any of the object files.
2252 Make sure to release them to avoid unnecessarily locking any
2253 of them while we're not actually debugging. */
2254 bfd_cache_close_all ();
2255
2256 return;
2257 }
2258 }
2259
2260 internal_error (__FILE__, __LINE__,
2261 "could not find a target to follow mourn inferiour");
2262 }
2263
2264 /* Look for a target which can describe architectural features, starting
2265 from TARGET. If we find one, return its description. */
2266
2267 const struct target_desc *
2268 target_read_description (struct target_ops *target)
2269 {
2270 struct target_ops *t;
2271
2272 for (t = target; t != NULL; t = t->beneath)
2273 if (t->to_read_description != NULL)
2274 {
2275 const struct target_desc *tdesc;
2276
2277 tdesc = t->to_read_description (t);
2278 if (tdesc)
2279 return tdesc;
2280 }
2281
2282 return NULL;
2283 }
2284
2285 /* The default implementation of to_search_memory.
2286 This implements a basic search of memory, reading target memory and
2287 performing the search here (as opposed to performing the search in on the
2288 target side with, for example, gdbserver). */
2289
2290 int
2291 simple_search_memory (struct target_ops *ops,
2292 CORE_ADDR start_addr, ULONGEST search_space_len,
2293 const gdb_byte *pattern, ULONGEST pattern_len,
2294 CORE_ADDR *found_addrp)
2295 {
2296 /* NOTE: also defined in find.c testcase. */
2297 #define SEARCH_CHUNK_SIZE 16000
2298 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2299 /* Buffer to hold memory contents for searching. */
2300 gdb_byte *search_buf;
2301 unsigned search_buf_size;
2302 struct cleanup *old_cleanups;
2303
2304 search_buf_size = chunk_size + pattern_len - 1;
2305
2306 /* No point in trying to allocate a buffer larger than the search space. */
2307 if (search_space_len < search_buf_size)
2308 search_buf_size = search_space_len;
2309
2310 search_buf = malloc (search_buf_size);
2311 if (search_buf == NULL)
2312 error (_("Unable to allocate memory to perform the search."));
2313 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2314
2315 /* Prime the search buffer. */
2316
2317 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2318 search_buf, start_addr, search_buf_size) != search_buf_size)
2319 {
2320 warning (_("Unable to access target memory at %s, halting search."),
2321 hex_string (start_addr));
2322 do_cleanups (old_cleanups);
2323 return -1;
2324 }
2325
2326 /* Perform the search.
2327
2328 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2329 When we've scanned N bytes we copy the trailing bytes to the start and
2330 read in another N bytes. */
2331
2332 while (search_space_len >= pattern_len)
2333 {
2334 gdb_byte *found_ptr;
2335 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2336
2337 found_ptr = memmem (search_buf, nr_search_bytes,
2338 pattern, pattern_len);
2339
2340 if (found_ptr != NULL)
2341 {
2342 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2343 *found_addrp = found_addr;
2344 do_cleanups (old_cleanups);
2345 return 1;
2346 }
2347
2348 /* Not found in this chunk, skip to next chunk. */
2349
2350 /* Don't let search_space_len wrap here, it's unsigned. */
2351 if (search_space_len >= chunk_size)
2352 search_space_len -= chunk_size;
2353 else
2354 search_space_len = 0;
2355
2356 if (search_space_len >= pattern_len)
2357 {
2358 unsigned keep_len = search_buf_size - chunk_size;
2359 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2360 int nr_to_read;
2361
2362 /* Copy the trailing part of the previous iteration to the front
2363 of the buffer for the next iteration. */
2364 gdb_assert (keep_len == pattern_len - 1);
2365 memcpy (search_buf, search_buf + chunk_size, keep_len);
2366
2367 nr_to_read = min (search_space_len - keep_len, chunk_size);
2368
2369 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2370 search_buf + keep_len, read_addr,
2371 nr_to_read) != nr_to_read)
2372 {
2373 warning (_("Unable to access target memory at %s, halting search."),
2374 hex_string (read_addr));
2375 do_cleanups (old_cleanups);
2376 return -1;
2377 }
2378
2379 start_addr += chunk_size;
2380 }
2381 }
2382
2383 /* Not found. */
2384
2385 do_cleanups (old_cleanups);
2386 return 0;
2387 }
2388
2389 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2390 sequence of bytes in PATTERN with length PATTERN_LEN.
2391
2392 The result is 1 if found, 0 if not found, and -1 if there was an error
2393 requiring halting of the search (e.g. memory read error).
2394 If the pattern is found the address is recorded in FOUND_ADDRP. */
2395
2396 int
2397 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2398 const gdb_byte *pattern, ULONGEST pattern_len,
2399 CORE_ADDR *found_addrp)
2400 {
2401 struct target_ops *t;
2402 int found;
2403
2404 /* We don't use INHERIT to set current_target.to_search_memory,
2405 so we have to scan the target stack and handle targetdebug
2406 ourselves. */
2407
2408 if (targetdebug)
2409 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2410 hex_string (start_addr));
2411
2412 for (t = current_target.beneath; t != NULL; t = t->beneath)
2413 if (t->to_search_memory != NULL)
2414 break;
2415
2416 if (t != NULL)
2417 {
2418 found = t->to_search_memory (t, start_addr, search_space_len,
2419 pattern, pattern_len, found_addrp);
2420 }
2421 else
2422 {
2423 /* If a special version of to_search_memory isn't available, use the
2424 simple version. */
2425 found = simple_search_memory (current_target.beneath,
2426 start_addr, search_space_len,
2427 pattern, pattern_len, found_addrp);
2428 }
2429
2430 if (targetdebug)
2431 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2432
2433 return found;
2434 }
2435
2436 /* Look through the currently pushed targets. If none of them will
2437 be able to restart the currently running process, issue an error
2438 message. */
2439
2440 void
2441 target_require_runnable (void)
2442 {
2443 struct target_ops *t;
2444
2445 for (t = target_stack; t != NULL; t = t->beneath)
2446 {
2447 /* If this target knows how to create a new program, then
2448 assume we will still be able to after killing the current
2449 one. Either killing and mourning will not pop T, or else
2450 find_default_run_target will find it again. */
2451 if (t->to_create_inferior != NULL)
2452 return;
2453
2454 /* Do not worry about thread_stratum targets that can not
2455 create inferiors. Assume they will be pushed again if
2456 necessary, and continue to the process_stratum. */
2457 if (t->to_stratum == thread_stratum
2458 || t->to_stratum == arch_stratum)
2459 continue;
2460
2461 error (_("\
2462 The \"%s\" target does not support \"run\". Try \"help target\" or \"continue\"."),
2463 t->to_shortname);
2464 }
2465
2466 /* This function is only called if the target is running. In that
2467 case there should have been a process_stratum target and it
2468 should either know how to create inferiors, or not... */
2469 internal_error (__FILE__, __LINE__, "No targets found");
2470 }
2471
2472 /* Look through the list of possible targets for a target that can
2473 execute a run or attach command without any other data. This is
2474 used to locate the default process stratum.
2475
2476 If DO_MESG is not NULL, the result is always valid (error() is
2477 called for errors); else, return NULL on error. */
2478
2479 static struct target_ops *
2480 find_default_run_target (char *do_mesg)
2481 {
2482 struct target_ops **t;
2483 struct target_ops *runable = NULL;
2484 int count;
2485
2486 count = 0;
2487
2488 for (t = target_structs; t < target_structs + target_struct_size;
2489 ++t)
2490 {
2491 if ((*t)->to_can_run && target_can_run (*t))
2492 {
2493 runable = *t;
2494 ++count;
2495 }
2496 }
2497
2498 if (count != 1)
2499 {
2500 if (do_mesg)
2501 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2502 else
2503 return NULL;
2504 }
2505
2506 return runable;
2507 }
2508
2509 void
2510 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2511 {
2512 struct target_ops *t;
2513
2514 t = find_default_run_target ("attach");
2515 (t->to_attach) (t, args, from_tty);
2516 return;
2517 }
2518
2519 void
2520 find_default_create_inferior (struct target_ops *ops,
2521 char *exec_file, char *allargs, char **env,
2522 int from_tty)
2523 {
2524 struct target_ops *t;
2525
2526 t = find_default_run_target ("run");
2527 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2528 return;
2529 }
2530
2531 static int
2532 find_default_can_async_p (void)
2533 {
2534 struct target_ops *t;
2535
2536 /* This may be called before the target is pushed on the stack;
2537 look for the default process stratum. If there's none, gdb isn't
2538 configured with a native debugger, and target remote isn't
2539 connected yet. */
2540 t = find_default_run_target (NULL);
2541 if (t && t->to_can_async_p)
2542 return (t->to_can_async_p) ();
2543 return 0;
2544 }
2545
2546 static int
2547 find_default_is_async_p (void)
2548 {
2549 struct target_ops *t;
2550
2551 /* This may be called before the target is pushed on the stack;
2552 look for the default process stratum. If there's none, gdb isn't
2553 configured with a native debugger, and target remote isn't
2554 connected yet. */
2555 t = find_default_run_target (NULL);
2556 if (t && t->to_is_async_p)
2557 return (t->to_is_async_p) ();
2558 return 0;
2559 }
2560
2561 static int
2562 find_default_supports_non_stop (void)
2563 {
2564 struct target_ops *t;
2565
2566 t = find_default_run_target (NULL);
2567 if (t && t->to_supports_non_stop)
2568 return (t->to_supports_non_stop) ();
2569 return 0;
2570 }
2571
2572 int
2573 target_supports_non_stop (void)
2574 {
2575 struct target_ops *t;
2576 for (t = &current_target; t != NULL; t = t->beneath)
2577 if (t->to_supports_non_stop)
2578 return t->to_supports_non_stop ();
2579
2580 return 0;
2581 }
2582
2583
2584 char *
2585 target_get_osdata (const char *type)
2586 {
2587 char *document;
2588 struct target_ops *t;
2589
2590 /* If we're already connected to something that can get us OS
2591 related data, use it. Otherwise, try using the native
2592 target. */
2593 if (current_target.to_stratum >= process_stratum)
2594 t = current_target.beneath;
2595 else
2596 t = find_default_run_target ("get OS data");
2597
2598 if (!t)
2599 return NULL;
2600
2601 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2602 }
2603
2604 /* Determine the current address space of thread PTID. */
2605
2606 struct address_space *
2607 target_thread_address_space (ptid_t ptid)
2608 {
2609 struct address_space *aspace;
2610 struct inferior *inf;
2611 struct target_ops *t;
2612
2613 for (t = current_target.beneath; t != NULL; t = t->beneath)
2614 {
2615 if (t->to_thread_address_space != NULL)
2616 {
2617 aspace = t->to_thread_address_space (t, ptid);
2618 gdb_assert (aspace);
2619
2620 if (targetdebug)
2621 fprintf_unfiltered (gdb_stdlog,
2622 "target_thread_address_space (%s) = %d\n",
2623 target_pid_to_str (ptid),
2624 address_space_num (aspace));
2625 return aspace;
2626 }
2627 }
2628
2629 /* Fall-back to the "main" address space of the inferior. */
2630 inf = find_inferior_pid (ptid_get_pid (ptid));
2631
2632 if (inf == NULL || inf->aspace == NULL)
2633 internal_error (__FILE__, __LINE__, "\
2634 Can't determine the current address space of thread %s\n",
2635 target_pid_to_str (ptid));
2636
2637 return inf->aspace;
2638 }
2639
2640 static int
2641 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2642 {
2643 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2644 }
2645
2646 static int
2647 default_watchpoint_addr_within_range (struct target_ops *target,
2648 CORE_ADDR addr,
2649 CORE_ADDR start, int length)
2650 {
2651 return addr >= start && addr < start + length;
2652 }
2653
2654 static struct gdbarch *
2655 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2656 {
2657 return target_gdbarch;
2658 }
2659
2660 static int
2661 return_zero (void)
2662 {
2663 return 0;
2664 }
2665
2666 static int
2667 return_one (void)
2668 {
2669 return 1;
2670 }
2671
2672 static int
2673 return_minus_one (void)
2674 {
2675 return -1;
2676 }
2677
2678 /* Find a single runnable target in the stack and return it. If for
2679 some reason there is more than one, return NULL. */
2680
2681 struct target_ops *
2682 find_run_target (void)
2683 {
2684 struct target_ops **t;
2685 struct target_ops *runable = NULL;
2686 int count;
2687
2688 count = 0;
2689
2690 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2691 {
2692 if ((*t)->to_can_run && target_can_run (*t))
2693 {
2694 runable = *t;
2695 ++count;
2696 }
2697 }
2698
2699 return (count == 1 ? runable : NULL);
2700 }
2701
2702 /* Find a single core_stratum target in the list of targets and return it.
2703 If for some reason there is more than one, return NULL. */
2704
2705 struct target_ops *
2706 find_core_target (void)
2707 {
2708 struct target_ops **t;
2709 struct target_ops *runable = NULL;
2710 int count;
2711
2712 count = 0;
2713
2714 for (t = target_structs; t < target_structs + target_struct_size;
2715 ++t)
2716 {
2717 if ((*t)->to_stratum == core_stratum)
2718 {
2719 runable = *t;
2720 ++count;
2721 }
2722 }
2723
2724 return (count == 1 ? runable : NULL);
2725 }
2726
2727 /*
2728 * Find the next target down the stack from the specified target.
2729 */
2730
2731 struct target_ops *
2732 find_target_beneath (struct target_ops *t)
2733 {
2734 return t->beneath;
2735 }
2736
2737 \f
2738 /* The inferior process has died. Long live the inferior! */
2739
2740 void
2741 generic_mourn_inferior (void)
2742 {
2743 ptid_t ptid;
2744
2745 ptid = inferior_ptid;
2746 inferior_ptid = null_ptid;
2747
2748 if (!ptid_equal (ptid, null_ptid))
2749 {
2750 int pid = ptid_get_pid (ptid);
2751 exit_inferior (pid);
2752 }
2753
2754 breakpoint_init_inferior (inf_exited);
2755 registers_changed ();
2756
2757 reopen_exec_file ();
2758 reinit_frame_cache ();
2759
2760 if (deprecated_detach_hook)
2761 deprecated_detach_hook ();
2762 }
2763 \f
2764 /* Helper function for child_wait and the derivatives of child_wait.
2765 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2766 translation of that in OURSTATUS. */
2767 void
2768 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2769 {
2770 if (WIFEXITED (hoststatus))
2771 {
2772 ourstatus->kind = TARGET_WAITKIND_EXITED;
2773 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2774 }
2775 else if (!WIFSTOPPED (hoststatus))
2776 {
2777 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2778 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2779 }
2780 else
2781 {
2782 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2783 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2784 }
2785 }
2786 \f
2787 /* Convert a normal process ID to a string. Returns the string in a
2788 static buffer. */
2789
2790 char *
2791 normal_pid_to_str (ptid_t ptid)
2792 {
2793 static char buf[32];
2794
2795 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2796 return buf;
2797 }
2798
2799 static char *
2800 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
2801 {
2802 return normal_pid_to_str (ptid);
2803 }
2804
2805 /* Error-catcher for target_find_memory_regions. */
2806 static int
2807 dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
2808 {
2809 error (_("Command not implemented for this target."));
2810 return 0;
2811 }
2812
2813 /* Error-catcher for target_make_corefile_notes. */
2814 static char *
2815 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
2816 {
2817 error (_("Command not implemented for this target."));
2818 return NULL;
2819 }
2820
2821 /* Error-catcher for target_get_bookmark. */
2822 static gdb_byte *
2823 dummy_get_bookmark (char *ignore1, int ignore2)
2824 {
2825 tcomplain ();
2826 return NULL;
2827 }
2828
2829 /* Error-catcher for target_goto_bookmark. */
2830 static void
2831 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
2832 {
2833 tcomplain ();
2834 }
2835
2836 /* Set up the handful of non-empty slots needed by the dummy target
2837 vector. */
2838
2839 static void
2840 init_dummy_target (void)
2841 {
2842 dummy_target.to_shortname = "None";
2843 dummy_target.to_longname = "None";
2844 dummy_target.to_doc = "";
2845 dummy_target.to_attach = find_default_attach;
2846 dummy_target.to_detach =
2847 (void (*)(struct target_ops *, char *, int))target_ignore;
2848 dummy_target.to_create_inferior = find_default_create_inferior;
2849 dummy_target.to_can_async_p = find_default_can_async_p;
2850 dummy_target.to_is_async_p = find_default_is_async_p;
2851 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
2852 dummy_target.to_pid_to_str = dummy_pid_to_str;
2853 dummy_target.to_stratum = dummy_stratum;
2854 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
2855 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
2856 dummy_target.to_get_bookmark = dummy_get_bookmark;
2857 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
2858 dummy_target.to_xfer_partial = default_xfer_partial;
2859 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
2860 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
2861 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
2862 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
2863 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
2864 dummy_target.to_magic = OPS_MAGIC;
2865 }
2866 \f
2867 static void
2868 debug_to_open (char *args, int from_tty)
2869 {
2870 debug_target.to_open (args, from_tty);
2871
2872 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
2873 }
2874
2875 void
2876 target_close (struct target_ops *targ, int quitting)
2877 {
2878 if (targ->to_xclose != NULL)
2879 targ->to_xclose (targ, quitting);
2880 else if (targ->to_close != NULL)
2881 targ->to_close (quitting);
2882
2883 if (targetdebug)
2884 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
2885 }
2886
2887 void
2888 target_attach (char *args, int from_tty)
2889 {
2890 struct target_ops *t;
2891 for (t = current_target.beneath; t != NULL; t = t->beneath)
2892 {
2893 if (t->to_attach != NULL)
2894 {
2895 t->to_attach (t, args, from_tty);
2896 if (targetdebug)
2897 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
2898 args, from_tty);
2899 return;
2900 }
2901 }
2902
2903 internal_error (__FILE__, __LINE__,
2904 "could not find a target to attach");
2905 }
2906
2907 int
2908 target_thread_alive (ptid_t ptid)
2909 {
2910 struct target_ops *t;
2911 for (t = current_target.beneath; t != NULL; t = t->beneath)
2912 {
2913 if (t->to_thread_alive != NULL)
2914 {
2915 int retval;
2916
2917 retval = t->to_thread_alive (t, ptid);
2918 if (targetdebug)
2919 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
2920 PIDGET (ptid), retval);
2921
2922 return retval;
2923 }
2924 }
2925
2926 return 0;
2927 }
2928
2929 void
2930 target_find_new_threads (void)
2931 {
2932 struct target_ops *t;
2933 for (t = current_target.beneath; t != NULL; t = t->beneath)
2934 {
2935 if (t->to_find_new_threads != NULL)
2936 {
2937 t->to_find_new_threads (t);
2938 if (targetdebug)
2939 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
2940
2941 return;
2942 }
2943 }
2944 }
2945
2946 static void
2947 debug_to_post_attach (int pid)
2948 {
2949 debug_target.to_post_attach (pid);
2950
2951 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
2952 }
2953
2954 /* Return a pretty printed form of target_waitstatus.
2955 Space for the result is malloc'd, caller must free. */
2956
2957 char *
2958 target_waitstatus_to_string (const struct target_waitstatus *ws)
2959 {
2960 const char *kind_str = "status->kind = ";
2961
2962 switch (ws->kind)
2963 {
2964 case TARGET_WAITKIND_EXITED:
2965 return xstrprintf ("%sexited, status = %d",
2966 kind_str, ws->value.integer);
2967 case TARGET_WAITKIND_STOPPED:
2968 return xstrprintf ("%sstopped, signal = %s",
2969 kind_str, target_signal_to_name (ws->value.sig));
2970 case TARGET_WAITKIND_SIGNALLED:
2971 return xstrprintf ("%ssignalled, signal = %s",
2972 kind_str, target_signal_to_name (ws->value.sig));
2973 case TARGET_WAITKIND_LOADED:
2974 return xstrprintf ("%sloaded", kind_str);
2975 case TARGET_WAITKIND_FORKED:
2976 return xstrprintf ("%sforked", kind_str);
2977 case TARGET_WAITKIND_VFORKED:
2978 return xstrprintf ("%svforked", kind_str);
2979 case TARGET_WAITKIND_EXECD:
2980 return xstrprintf ("%sexecd", kind_str);
2981 case TARGET_WAITKIND_SYSCALL_ENTRY:
2982 return xstrprintf ("%sentered syscall", kind_str);
2983 case TARGET_WAITKIND_SYSCALL_RETURN:
2984 return xstrprintf ("%sexited syscall", kind_str);
2985 case TARGET_WAITKIND_SPURIOUS:
2986 return xstrprintf ("%sspurious", kind_str);
2987 case TARGET_WAITKIND_IGNORE:
2988 return xstrprintf ("%signore", kind_str);
2989 case TARGET_WAITKIND_NO_HISTORY:
2990 return xstrprintf ("%sno-history", kind_str);
2991 default:
2992 return xstrprintf ("%sunknown???", kind_str);
2993 }
2994 }
2995
2996 static void
2997 debug_print_register (const char * func,
2998 struct regcache *regcache, int regno)
2999 {
3000 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3001 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3002 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3003 && gdbarch_register_name (gdbarch, regno) != NULL
3004 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3005 fprintf_unfiltered (gdb_stdlog, "(%s)",
3006 gdbarch_register_name (gdbarch, regno));
3007 else
3008 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3009 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3010 {
3011 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3012 int i, size = register_size (gdbarch, regno);
3013 unsigned char buf[MAX_REGISTER_SIZE];
3014 regcache_raw_collect (regcache, regno, buf);
3015 fprintf_unfiltered (gdb_stdlog, " = ");
3016 for (i = 0; i < size; i++)
3017 {
3018 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3019 }
3020 if (size <= sizeof (LONGEST))
3021 {
3022 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3023 fprintf_unfiltered (gdb_stdlog, " %s %s",
3024 core_addr_to_string_nz (val), plongest (val));
3025 }
3026 }
3027 fprintf_unfiltered (gdb_stdlog, "\n");
3028 }
3029
3030 void
3031 target_fetch_registers (struct regcache *regcache, int regno)
3032 {
3033 struct target_ops *t;
3034 for (t = current_target.beneath; t != NULL; t = t->beneath)
3035 {
3036 if (t->to_fetch_registers != NULL)
3037 {
3038 t->to_fetch_registers (t, regcache, regno);
3039 if (targetdebug)
3040 debug_print_register ("target_fetch_registers", regcache, regno);
3041 return;
3042 }
3043 }
3044 }
3045
3046 void
3047 target_store_registers (struct regcache *regcache, int regno)
3048 {
3049
3050 struct target_ops *t;
3051 for (t = current_target.beneath; t != NULL; t = t->beneath)
3052 {
3053 if (t->to_store_registers != NULL)
3054 {
3055 t->to_store_registers (t, regcache, regno);
3056 if (targetdebug)
3057 {
3058 debug_print_register ("target_store_registers", regcache, regno);
3059 }
3060 return;
3061 }
3062 }
3063
3064 noprocess ();
3065 }
3066
3067 int
3068 target_core_of_thread (ptid_t ptid)
3069 {
3070 struct target_ops *t;
3071
3072 for (t = current_target.beneath; t != NULL; t = t->beneath)
3073 {
3074 if (t->to_core_of_thread != NULL)
3075 {
3076 int retval = t->to_core_of_thread (t, ptid);
3077 if (targetdebug)
3078 fprintf_unfiltered (gdb_stdlog, "target_core_of_thread (%d) = %d\n",
3079 PIDGET (ptid), retval);
3080 return retval;
3081 }
3082 }
3083
3084 return -1;
3085 }
3086
3087 static void
3088 debug_to_prepare_to_store (struct regcache *regcache)
3089 {
3090 debug_target.to_prepare_to_store (regcache);
3091
3092 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3093 }
3094
3095 static int
3096 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3097 int write, struct mem_attrib *attrib,
3098 struct target_ops *target)
3099 {
3100 int retval;
3101
3102 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3103 attrib, target);
3104
3105 fprintf_unfiltered (gdb_stdlog,
3106 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3107 paddress (target_gdbarch, memaddr), len,
3108 write ? "write" : "read", retval);
3109
3110 if (retval > 0)
3111 {
3112 int i;
3113
3114 fputs_unfiltered (", bytes =", gdb_stdlog);
3115 for (i = 0; i < retval; i++)
3116 {
3117 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3118 {
3119 if (targetdebug < 2 && i > 0)
3120 {
3121 fprintf_unfiltered (gdb_stdlog, " ...");
3122 break;
3123 }
3124 fprintf_unfiltered (gdb_stdlog, "\n");
3125 }
3126
3127 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3128 }
3129 }
3130
3131 fputc_unfiltered ('\n', gdb_stdlog);
3132
3133 return retval;
3134 }
3135
3136 static void
3137 debug_to_files_info (struct target_ops *target)
3138 {
3139 debug_target.to_files_info (target);
3140
3141 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3142 }
3143
3144 static int
3145 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3146 struct bp_target_info *bp_tgt)
3147 {
3148 int retval;
3149
3150 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3151
3152 fprintf_unfiltered (gdb_stdlog,
3153 "target_insert_breakpoint (0x%lx, xxx) = %ld\n",
3154 (unsigned long) bp_tgt->placed_address,
3155 (unsigned long) retval);
3156 return retval;
3157 }
3158
3159 static int
3160 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3161 struct bp_target_info *bp_tgt)
3162 {
3163 int retval;
3164
3165 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3166
3167 fprintf_unfiltered (gdb_stdlog,
3168 "target_remove_breakpoint (0x%lx, xxx) = %ld\n",
3169 (unsigned long) bp_tgt->placed_address,
3170 (unsigned long) retval);
3171 return retval;
3172 }
3173
3174 static int
3175 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3176 {
3177 int retval;
3178
3179 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3180
3181 fprintf_unfiltered (gdb_stdlog,
3182 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3183 (unsigned long) type,
3184 (unsigned long) cnt,
3185 (unsigned long) from_tty,
3186 (unsigned long) retval);
3187 return retval;
3188 }
3189
3190 static int
3191 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3192 {
3193 CORE_ADDR retval;
3194
3195 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3196
3197 fprintf_unfiltered (gdb_stdlog,
3198 "target_region_ok_for_hw_watchpoint (%ld, %ld) = 0x%lx\n",
3199 (unsigned long) addr,
3200 (unsigned long) len,
3201 (unsigned long) retval);
3202 return retval;
3203 }
3204
3205 static int
3206 debug_to_stopped_by_watchpoint (void)
3207 {
3208 int retval;
3209
3210 retval = debug_target.to_stopped_by_watchpoint ();
3211
3212 fprintf_unfiltered (gdb_stdlog,
3213 "target_stopped_by_watchpoint () = %ld\n",
3214 (unsigned long) retval);
3215 return retval;
3216 }
3217
3218 static int
3219 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3220 {
3221 int retval;
3222
3223 retval = debug_target.to_stopped_data_address (target, addr);
3224
3225 fprintf_unfiltered (gdb_stdlog,
3226 "target_stopped_data_address ([0x%lx]) = %ld\n",
3227 (unsigned long)*addr,
3228 (unsigned long)retval);
3229 return retval;
3230 }
3231
3232 static int
3233 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3234 CORE_ADDR addr,
3235 CORE_ADDR start, int length)
3236 {
3237 int retval;
3238
3239 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3240 start, length);
3241
3242 fprintf_filtered (gdb_stdlog,
3243 "target_watchpoint_addr_within_range (0x%lx, 0x%lx, %d) = %d\n",
3244 (unsigned long) addr, (unsigned long) start, length,
3245 retval);
3246 return retval;
3247 }
3248
3249 static int
3250 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3251 struct bp_target_info *bp_tgt)
3252 {
3253 int retval;
3254
3255 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3256
3257 fprintf_unfiltered (gdb_stdlog,
3258 "target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
3259 (unsigned long) bp_tgt->placed_address,
3260 (unsigned long) retval);
3261 return retval;
3262 }
3263
3264 static int
3265 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3266 struct bp_target_info *bp_tgt)
3267 {
3268 int retval;
3269
3270 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3271
3272 fprintf_unfiltered (gdb_stdlog,
3273 "target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
3274 (unsigned long) bp_tgt->placed_address,
3275 (unsigned long) retval);
3276 return retval;
3277 }
3278
3279 static int
3280 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type)
3281 {
3282 int retval;
3283
3284 retval = debug_target.to_insert_watchpoint (addr, len, type);
3285
3286 fprintf_unfiltered (gdb_stdlog,
3287 "target_insert_watchpoint (0x%lx, %d, %d) = %ld\n",
3288 (unsigned long) addr, len, type, (unsigned long) retval);
3289 return retval;
3290 }
3291
3292 static int
3293 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type)
3294 {
3295 int retval;
3296
3297 retval = debug_target.to_remove_watchpoint (addr, len, type);
3298
3299 fprintf_unfiltered (gdb_stdlog,
3300 "target_remove_watchpoint (0x%lx, %d, %d) = %ld\n",
3301 (unsigned long) addr, len, type, (unsigned long) retval);
3302 return retval;
3303 }
3304
3305 static void
3306 debug_to_terminal_init (void)
3307 {
3308 debug_target.to_terminal_init ();
3309
3310 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3311 }
3312
3313 static void
3314 debug_to_terminal_inferior (void)
3315 {
3316 debug_target.to_terminal_inferior ();
3317
3318 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3319 }
3320
3321 static void
3322 debug_to_terminal_ours_for_output (void)
3323 {
3324 debug_target.to_terminal_ours_for_output ();
3325
3326 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3327 }
3328
3329 static void
3330 debug_to_terminal_ours (void)
3331 {
3332 debug_target.to_terminal_ours ();
3333
3334 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3335 }
3336
3337 static void
3338 debug_to_terminal_save_ours (void)
3339 {
3340 debug_target.to_terminal_save_ours ();
3341
3342 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3343 }
3344
3345 static void
3346 debug_to_terminal_info (char *arg, int from_tty)
3347 {
3348 debug_target.to_terminal_info (arg, from_tty);
3349
3350 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3351 from_tty);
3352 }
3353
3354 static void
3355 debug_to_load (char *args, int from_tty)
3356 {
3357 debug_target.to_load (args, from_tty);
3358
3359 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3360 }
3361
3362 static int
3363 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3364 {
3365 int retval;
3366
3367 retval = debug_target.to_lookup_symbol (name, addrp);
3368
3369 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3370
3371 return retval;
3372 }
3373
3374 static void
3375 debug_to_post_startup_inferior (ptid_t ptid)
3376 {
3377 debug_target.to_post_startup_inferior (ptid);
3378
3379 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3380 PIDGET (ptid));
3381 }
3382
3383 static void
3384 debug_to_acknowledge_created_inferior (int pid)
3385 {
3386 debug_target.to_acknowledge_created_inferior (pid);
3387
3388 fprintf_unfiltered (gdb_stdlog, "target_acknowledge_created_inferior (%d)\n",
3389 pid);
3390 }
3391
3392 static void
3393 debug_to_insert_fork_catchpoint (int pid)
3394 {
3395 debug_target.to_insert_fork_catchpoint (pid);
3396
3397 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
3398 pid);
3399 }
3400
3401 static int
3402 debug_to_remove_fork_catchpoint (int pid)
3403 {
3404 int retval;
3405
3406 retval = debug_target.to_remove_fork_catchpoint (pid);
3407
3408 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3409 pid, retval);
3410
3411 return retval;
3412 }
3413
3414 static void
3415 debug_to_insert_vfork_catchpoint (int pid)
3416 {
3417 debug_target.to_insert_vfork_catchpoint (pid);
3418
3419 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
3420 pid);
3421 }
3422
3423 static int
3424 debug_to_remove_vfork_catchpoint (int pid)
3425 {
3426 int retval;
3427
3428 retval = debug_target.to_remove_vfork_catchpoint (pid);
3429
3430 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3431 pid, retval);
3432
3433 return retval;
3434 }
3435
3436 static void
3437 debug_to_insert_exec_catchpoint (int pid)
3438 {
3439 debug_target.to_insert_exec_catchpoint (pid);
3440
3441 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
3442 pid);
3443 }
3444
3445 static int
3446 debug_to_remove_exec_catchpoint (int pid)
3447 {
3448 int retval;
3449
3450 retval = debug_target.to_remove_exec_catchpoint (pid);
3451
3452 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3453 pid, retval);
3454
3455 return retval;
3456 }
3457
3458 static int
3459 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3460 {
3461 int has_exited;
3462
3463 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3464
3465 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3466 pid, wait_status, *exit_status, has_exited);
3467
3468 return has_exited;
3469 }
3470
3471 static int
3472 debug_to_can_run (void)
3473 {
3474 int retval;
3475
3476 retval = debug_target.to_can_run ();
3477
3478 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3479
3480 return retval;
3481 }
3482
3483 static void
3484 debug_to_notice_signals (ptid_t ptid)
3485 {
3486 debug_target.to_notice_signals (ptid);
3487
3488 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3489 PIDGET (ptid));
3490 }
3491
3492 static struct gdbarch *
3493 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3494 {
3495 struct gdbarch *retval;
3496
3497 retval = debug_target.to_thread_architecture (ops, ptid);
3498
3499 fprintf_unfiltered (gdb_stdlog, "target_thread_architecture (%s) = %s [%s]\n",
3500 target_pid_to_str (ptid), host_address_to_string (retval),
3501 gdbarch_bfd_arch_info (retval)->printable_name);
3502 return retval;
3503 }
3504
3505 static void
3506 debug_to_stop (ptid_t ptid)
3507 {
3508 debug_target.to_stop (ptid);
3509
3510 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3511 target_pid_to_str (ptid));
3512 }
3513
3514 static void
3515 debug_to_rcmd (char *command,
3516 struct ui_file *outbuf)
3517 {
3518 debug_target.to_rcmd (command, outbuf);
3519 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3520 }
3521
3522 static char *
3523 debug_to_pid_to_exec_file (int pid)
3524 {
3525 char *exec_file;
3526
3527 exec_file = debug_target.to_pid_to_exec_file (pid);
3528
3529 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3530 pid, exec_file);
3531
3532 return exec_file;
3533 }
3534
3535 static void
3536 setup_target_debug (void)
3537 {
3538 memcpy (&debug_target, &current_target, sizeof debug_target);
3539
3540 current_target.to_open = debug_to_open;
3541 current_target.to_post_attach = debug_to_post_attach;
3542 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3543 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3544 current_target.to_files_info = debug_to_files_info;
3545 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3546 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3547 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3548 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3549 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3550 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3551 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3552 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3553 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3554 current_target.to_watchpoint_addr_within_range = debug_to_watchpoint_addr_within_range;
3555 current_target.to_region_ok_for_hw_watchpoint = debug_to_region_ok_for_hw_watchpoint;
3556 current_target.to_terminal_init = debug_to_terminal_init;
3557 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3558 current_target.to_terminal_ours_for_output = debug_to_terminal_ours_for_output;
3559 current_target.to_terminal_ours = debug_to_terminal_ours;
3560 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3561 current_target.to_terminal_info = debug_to_terminal_info;
3562 current_target.to_load = debug_to_load;
3563 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3564 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3565 current_target.to_acknowledge_created_inferior = debug_to_acknowledge_created_inferior;
3566 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3567 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3568 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3569 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3570 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3571 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3572 current_target.to_has_exited = debug_to_has_exited;
3573 current_target.to_can_run = debug_to_can_run;
3574 current_target.to_notice_signals = debug_to_notice_signals;
3575 current_target.to_stop = debug_to_stop;
3576 current_target.to_rcmd = debug_to_rcmd;
3577 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3578 current_target.to_thread_architecture = debug_to_thread_architecture;
3579 }
3580 \f
3581
3582 static char targ_desc[] =
3583 "Names of targets and files being debugged.\n\
3584 Shows the entire stack of targets currently in use (including the exec-file,\n\
3585 core-file, and process, if any), as well as the symbol file name.";
3586
3587 static void
3588 do_monitor_command (char *cmd,
3589 int from_tty)
3590 {
3591 if ((current_target.to_rcmd
3592 == (void (*) (char *, struct ui_file *)) tcomplain)
3593 || (current_target.to_rcmd == debug_to_rcmd
3594 && (debug_target.to_rcmd
3595 == (void (*) (char *, struct ui_file *)) tcomplain)))
3596 error (_("\"monitor\" command not supported by this target."));
3597 target_rcmd (cmd, gdb_stdtarg);
3598 }
3599
3600 /* Print the name of each layers of our target stack. */
3601
3602 static void
3603 maintenance_print_target_stack (char *cmd, int from_tty)
3604 {
3605 struct target_ops *t;
3606
3607 printf_filtered (_("The current target stack is:\n"));
3608
3609 for (t = target_stack; t != NULL; t = t->beneath)
3610 {
3611 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3612 }
3613 }
3614
3615 /* Controls if async mode is permitted. */
3616 int target_async_permitted = 0;
3617
3618 /* The set command writes to this variable. If the inferior is
3619 executing, linux_nat_async_permitted is *not* updated. */
3620 static int target_async_permitted_1 = 0;
3621
3622 static void
3623 set_maintenance_target_async_permitted (char *args, int from_tty,
3624 struct cmd_list_element *c)
3625 {
3626 if (have_live_inferiors ())
3627 {
3628 target_async_permitted_1 = target_async_permitted;
3629 error (_("Cannot change this setting while the inferior is running."));
3630 }
3631
3632 target_async_permitted = target_async_permitted_1;
3633 }
3634
3635 static void
3636 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3637 struct cmd_list_element *c,
3638 const char *value)
3639 {
3640 fprintf_filtered (file, _("\
3641 Controlling the inferior in asynchronous mode is %s.\n"), value);
3642 }
3643
3644 void
3645 initialize_targets (void)
3646 {
3647 init_dummy_target ();
3648 push_target (&dummy_target);
3649
3650 add_info ("target", target_info, targ_desc);
3651 add_info ("files", target_info, targ_desc);
3652
3653 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3654 Set target debugging."), _("\
3655 Show target debugging."), _("\
3656 When non-zero, target debugging is enabled. Higher numbers are more\n\
3657 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3658 command."),
3659 NULL,
3660 show_targetdebug,
3661 &setdebuglist, &showdebuglist);
3662
3663 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3664 &trust_readonly, _("\
3665 Set mode for reading from readonly sections."), _("\
3666 Show mode for reading from readonly sections."), _("\
3667 When this mode is on, memory reads from readonly sections (such as .text)\n\
3668 will be read from the object file instead of from the target. This will\n\
3669 result in significant performance improvement for remote targets."),
3670 NULL,
3671 show_trust_readonly,
3672 &setlist, &showlist);
3673
3674 add_com ("monitor", class_obscure, do_monitor_command,
3675 _("Send a command to the remote monitor (remote targets only)."));
3676
3677 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3678 _("Print the name of each layer of the internal target stack."),
3679 &maintenanceprintlist);
3680
3681 add_setshow_boolean_cmd ("target-async", no_class,
3682 &target_async_permitted_1, _("\
3683 Set whether gdb controls the inferior in asynchronous mode."), _("\
3684 Show whether gdb controls the inferior in asynchronous mode."), _("\
3685 Tells gdb whether to control the inferior in asynchronous mode."),
3686 set_maintenance_target_async_permitted,
3687 show_maintenance_target_async_permitted,
3688 &setlist,
3689 &showlist);
3690
3691 add_setshow_boolean_cmd ("stack-cache", class_support,
3692 &stack_cache_enabled_p_1, _("\
3693 Set cache use for stack access."), _("\
3694 Show cache use for stack access."), _("\
3695 When on, use the data cache for all stack access, regardless of any\n\
3696 configured memory regions. This improves remote performance significantly.\n\
3697 By default, caching for stack access is on."),
3698 set_stack_cache_enabled_p,
3699 show_stack_cache_enabled_p,
3700 &setlist, &showlist);
3701
3702 target_dcache = dcache_init ();
3703 }
This page took 0.111852 seconds and 4 git commands to generate.