2011-01-05 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46 #include "tracepoint.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static int nosymbol (char *, CORE_ADDR *);
58
59 static void tcomplain (void) ATTRIBUTE_NORETURN;
60
61 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
62
63 static int return_zero (void);
64
65 static int return_one (void);
66
67 static int return_minus_one (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static LONGEST default_xfer_partial (struct target_ops *ops,
76 enum target_object object,
77 const char *annex, gdb_byte *readbuf,
78 const gdb_byte *writebuf,
79 ULONGEST offset, LONGEST len);
80
81 static LONGEST current_xfer_partial (struct target_ops *ops,
82 enum target_object object,
83 const char *annex, gdb_byte *readbuf,
84 const gdb_byte *writebuf,
85 ULONGEST offset, LONGEST len);
86
87 static LONGEST target_xfer_partial (struct target_ops *ops,
88 enum target_object object,
89 const char *annex,
90 void *readbuf, const void *writebuf,
91 ULONGEST offset, LONGEST len);
92
93 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
94 ptid_t ptid);
95
96 static void init_dummy_target (void);
97
98 static struct target_ops debug_target;
99
100 static void debug_to_open (char *, int);
101
102 static void debug_to_prepare_to_store (struct regcache *);
103
104 static void debug_to_files_info (struct target_ops *);
105
106 static int debug_to_insert_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_remove_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_can_use_hw_breakpoint (int, int, int);
113
114 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
124 struct expression *);
125
126 static int debug_to_stopped_by_watchpoint (void);
127
128 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
129
130 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
131 CORE_ADDR, CORE_ADDR, int);
132
133 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
134
135 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
136 struct expression *);
137
138 static void debug_to_terminal_init (void);
139
140 static void debug_to_terminal_inferior (void);
141
142 static void debug_to_terminal_ours_for_output (void);
143
144 static void debug_to_terminal_save_ours (void);
145
146 static void debug_to_terminal_ours (void);
147
148 static void debug_to_terminal_info (char *, int);
149
150 static void debug_to_load (char *, int);
151
152 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
153
154 static int debug_to_can_run (void);
155
156 static void debug_to_notice_signals (ptid_t);
157
158 static void debug_to_stop (ptid_t);
159
160 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
161 wierd and mysterious ways. Putting the variable here lets those
162 wierd and mysterious ways keep building while they are being
163 converted to the inferior inheritance structure. */
164 struct target_ops deprecated_child_ops;
165
166 /* Pointer to array of target architecture structures; the size of the
167 array; the current index into the array; the allocated size of the
168 array. */
169 struct target_ops **target_structs;
170 unsigned target_struct_size;
171 unsigned target_struct_index;
172 unsigned target_struct_allocsize;
173 #define DEFAULT_ALLOCSIZE 10
174
175 /* The initial current target, so that there is always a semi-valid
176 current target. */
177
178 static struct target_ops dummy_target;
179
180 /* Top of target stack. */
181
182 static struct target_ops *target_stack;
183
184 /* The target structure we are currently using to talk to a process
185 or file or whatever "inferior" we have. */
186
187 struct target_ops current_target;
188
189 /* Command list for target. */
190
191 static struct cmd_list_element *targetlist = NULL;
192
193 /* Nonzero if we should trust readonly sections from the
194 executable when reading memory. */
195
196 static int trust_readonly = 0;
197
198 /* Nonzero if we should show true memory content including
199 memory breakpoint inserted by gdb. */
200
201 static int show_memory_breakpoints = 0;
202
203 /* These globals control whether GDB attempts to perform these
204 operations; they are useful for targets that need to prevent
205 inadvertant disruption, such as in non-stop mode. */
206
207 int may_write_registers = 1;
208
209 int may_write_memory = 1;
210
211 int may_insert_breakpoints = 1;
212
213 int may_insert_tracepoints = 1;
214
215 int may_insert_fast_tracepoints = 1;
216
217 int may_stop = 1;
218
219 /* Non-zero if we want to see trace of target level stuff. */
220
221 static int targetdebug = 0;
222 static void
223 show_targetdebug (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225 {
226 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
227 }
228
229 static void setup_target_debug (void);
230
231 /* The option sets this. */
232 static int stack_cache_enabled_p_1 = 1;
233 /* And set_stack_cache_enabled_p updates this.
234 The reason for the separation is so that we don't flush the cache for
235 on->on transitions. */
236 static int stack_cache_enabled_p = 1;
237
238 /* This is called *after* the stack-cache has been set.
239 Flush the cache for off->on and on->off transitions.
240 There's no real need to flush the cache for on->off transitions,
241 except cleanliness. */
242
243 static void
244 set_stack_cache_enabled_p (char *args, int from_tty,
245 struct cmd_list_element *c)
246 {
247 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
248 target_dcache_invalidate ();
249
250 stack_cache_enabled_p = stack_cache_enabled_p_1;
251 }
252
253 static void
254 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
255 struct cmd_list_element *c, const char *value)
256 {
257 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
258 }
259
260 /* Cache of memory operations, to speed up remote access. */
261 static DCACHE *target_dcache;
262
263 /* Invalidate the target dcache. */
264
265 void
266 target_dcache_invalidate (void)
267 {
268 dcache_invalidate (target_dcache);
269 }
270
271 /* The user just typed 'target' without the name of a target. */
272
273 static void
274 target_command (char *arg, int from_tty)
275 {
276 fputs_filtered ("Argument required (target name). Try `help target'\n",
277 gdb_stdout);
278 }
279
280 /* Default target_has_* methods for process_stratum targets. */
281
282 int
283 default_child_has_all_memory (struct target_ops *ops)
284 {
285 /* If no inferior selected, then we can't read memory here. */
286 if (ptid_equal (inferior_ptid, null_ptid))
287 return 0;
288
289 return 1;
290 }
291
292 int
293 default_child_has_memory (struct target_ops *ops)
294 {
295 /* If no inferior selected, then we can't read memory here. */
296 if (ptid_equal (inferior_ptid, null_ptid))
297 return 0;
298
299 return 1;
300 }
301
302 int
303 default_child_has_stack (struct target_ops *ops)
304 {
305 /* If no inferior selected, there's no stack. */
306 if (ptid_equal (inferior_ptid, null_ptid))
307 return 0;
308
309 return 1;
310 }
311
312 int
313 default_child_has_registers (struct target_ops *ops)
314 {
315 /* Can't read registers from no inferior. */
316 if (ptid_equal (inferior_ptid, null_ptid))
317 return 0;
318
319 return 1;
320 }
321
322 int
323 default_child_has_execution (struct target_ops *ops)
324 {
325 /* If there's no thread selected, then we can't make it run through
326 hoops. */
327 if (ptid_equal (inferior_ptid, null_ptid))
328 return 0;
329
330 return 1;
331 }
332
333
334 int
335 target_has_all_memory_1 (void)
336 {
337 struct target_ops *t;
338
339 for (t = current_target.beneath; t != NULL; t = t->beneath)
340 if (t->to_has_all_memory (t))
341 return 1;
342
343 return 0;
344 }
345
346 int
347 target_has_memory_1 (void)
348 {
349 struct target_ops *t;
350
351 for (t = current_target.beneath; t != NULL; t = t->beneath)
352 if (t->to_has_memory (t))
353 return 1;
354
355 return 0;
356 }
357
358 int
359 target_has_stack_1 (void)
360 {
361 struct target_ops *t;
362
363 for (t = current_target.beneath; t != NULL; t = t->beneath)
364 if (t->to_has_stack (t))
365 return 1;
366
367 return 0;
368 }
369
370 int
371 target_has_registers_1 (void)
372 {
373 struct target_ops *t;
374
375 for (t = current_target.beneath; t != NULL; t = t->beneath)
376 if (t->to_has_registers (t))
377 return 1;
378
379 return 0;
380 }
381
382 int
383 target_has_execution_1 (void)
384 {
385 struct target_ops *t;
386
387 for (t = current_target.beneath; t != NULL; t = t->beneath)
388 if (t->to_has_execution (t))
389 return 1;
390
391 return 0;
392 }
393
394 /* Add a possible target architecture to the list. */
395
396 void
397 add_target (struct target_ops *t)
398 {
399 /* Provide default values for all "must have" methods. */
400 if (t->to_xfer_partial == NULL)
401 t->to_xfer_partial = default_xfer_partial;
402
403 if (t->to_has_all_memory == NULL)
404 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
405
406 if (t->to_has_memory == NULL)
407 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
408
409 if (t->to_has_stack == NULL)
410 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
411
412 if (t->to_has_registers == NULL)
413 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
414
415 if (t->to_has_execution == NULL)
416 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
417
418 if (!target_structs)
419 {
420 target_struct_allocsize = DEFAULT_ALLOCSIZE;
421 target_structs = (struct target_ops **) xmalloc
422 (target_struct_allocsize * sizeof (*target_structs));
423 }
424 if (target_struct_size >= target_struct_allocsize)
425 {
426 target_struct_allocsize *= 2;
427 target_structs = (struct target_ops **)
428 xrealloc ((char *) target_structs,
429 target_struct_allocsize * sizeof (*target_structs));
430 }
431 target_structs[target_struct_size++] = t;
432
433 if (targetlist == NULL)
434 add_prefix_cmd ("target", class_run, target_command, _("\
435 Connect to a target machine or process.\n\
436 The first argument is the type or protocol of the target machine.\n\
437 Remaining arguments are interpreted by the target protocol. For more\n\
438 information on the arguments for a particular protocol, type\n\
439 `help target ' followed by the protocol name."),
440 &targetlist, "target ", 0, &cmdlist);
441 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
442 }
443
444 /* Stub functions */
445
446 void
447 target_ignore (void)
448 {
449 }
450
451 void
452 target_kill (void)
453 {
454 struct target_ops *t;
455
456 for (t = current_target.beneath; t != NULL; t = t->beneath)
457 if (t->to_kill != NULL)
458 {
459 if (targetdebug)
460 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
461
462 t->to_kill (t);
463 return;
464 }
465
466 noprocess ();
467 }
468
469 void
470 target_load (char *arg, int from_tty)
471 {
472 target_dcache_invalidate ();
473 (*current_target.to_load) (arg, from_tty);
474 }
475
476 void
477 target_create_inferior (char *exec_file, char *args,
478 char **env, int from_tty)
479 {
480 struct target_ops *t;
481
482 for (t = current_target.beneath; t != NULL; t = t->beneath)
483 {
484 if (t->to_create_inferior != NULL)
485 {
486 t->to_create_inferior (t, exec_file, args, env, from_tty);
487 if (targetdebug)
488 fprintf_unfiltered (gdb_stdlog,
489 "target_create_inferior (%s, %s, xxx, %d)\n",
490 exec_file, args, from_tty);
491 return;
492 }
493 }
494
495 internal_error (__FILE__, __LINE__,
496 "could not find a target to create inferior");
497 }
498
499 void
500 target_terminal_inferior (void)
501 {
502 /* A background resume (``run&'') should leave GDB in control of the
503 terminal. Use target_can_async_p, not target_is_async_p, since at
504 this point the target is not async yet. However, if sync_execution
505 is not set, we know it will become async prior to resume. */
506 if (target_can_async_p () && !sync_execution)
507 return;
508
509 /* If GDB is resuming the inferior in the foreground, install
510 inferior's terminal modes. */
511 (*current_target.to_terminal_inferior) ();
512 }
513
514 static int
515 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
516 struct target_ops *t)
517 {
518 errno = EIO; /* Can't read/write this location */
519 return 0; /* No bytes handled */
520 }
521
522 static void
523 tcomplain (void)
524 {
525 error (_("You can't do that when your target is `%s'"),
526 current_target.to_shortname);
527 }
528
529 void
530 noprocess (void)
531 {
532 error (_("You can't do that without a process to debug."));
533 }
534
535 static int
536 nosymbol (char *name, CORE_ADDR *addrp)
537 {
538 return 1; /* Symbol does not exist in target env */
539 }
540
541 static void
542 default_terminal_info (char *args, int from_tty)
543 {
544 printf_unfiltered (_("No saved terminal information.\n"));
545 }
546
547 /* A default implementation for the to_get_ada_task_ptid target method.
548
549 This function builds the PTID by using both LWP and TID as part of
550 the PTID lwp and tid elements. The pid used is the pid of the
551 inferior_ptid. */
552
553 static ptid_t
554 default_get_ada_task_ptid (long lwp, long tid)
555 {
556 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 #define INHERIT(FIELD, TARGET) \
580 if (!current_target.FIELD) \
581 current_target.FIELD = (TARGET)->FIELD
582
583 for (t = target_stack; t; t = t->beneath)
584 {
585 INHERIT (to_shortname, t);
586 INHERIT (to_longname, t);
587 INHERIT (to_doc, t);
588 /* Do not inherit to_open. */
589 /* Do not inherit to_close. */
590 /* Do not inherit to_attach. */
591 INHERIT (to_post_attach, t);
592 INHERIT (to_attach_no_wait, t);
593 /* Do not inherit to_detach. */
594 /* Do not inherit to_disconnect. */
595 /* Do not inherit to_resume. */
596 /* Do not inherit to_wait. */
597 /* Do not inherit to_fetch_registers. */
598 /* Do not inherit to_store_registers. */
599 INHERIT (to_prepare_to_store, t);
600 INHERIT (deprecated_xfer_memory, t);
601 INHERIT (to_files_info, t);
602 INHERIT (to_insert_breakpoint, t);
603 INHERIT (to_remove_breakpoint, t);
604 INHERIT (to_can_use_hw_breakpoint, t);
605 INHERIT (to_insert_hw_breakpoint, t);
606 INHERIT (to_remove_hw_breakpoint, t);
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 INHERIT (to_stopped_data_address, t);
610 INHERIT (to_have_steppable_watchpoint, t);
611 INHERIT (to_have_continuable_watchpoint, t);
612 INHERIT (to_stopped_by_watchpoint, t);
613 INHERIT (to_watchpoint_addr_within_range, t);
614 INHERIT (to_region_ok_for_hw_watchpoint, t);
615 INHERIT (to_can_accel_watchpoint_condition, t);
616 INHERIT (to_terminal_init, t);
617 INHERIT (to_terminal_inferior, t);
618 INHERIT (to_terminal_ours_for_output, t);
619 INHERIT (to_terminal_ours, t);
620 INHERIT (to_terminal_save_ours, t);
621 INHERIT (to_terminal_info, t);
622 /* Do not inherit to_kill. */
623 INHERIT (to_load, t);
624 INHERIT (to_lookup_symbol, t);
625 /* Do no inherit to_create_inferior. */
626 INHERIT (to_post_startup_inferior, t);
627 INHERIT (to_insert_fork_catchpoint, t);
628 INHERIT (to_remove_fork_catchpoint, t);
629 INHERIT (to_insert_vfork_catchpoint, t);
630 INHERIT (to_remove_vfork_catchpoint, t);
631 /* Do not inherit to_follow_fork. */
632 INHERIT (to_insert_exec_catchpoint, t);
633 INHERIT (to_remove_exec_catchpoint, t);
634 INHERIT (to_set_syscall_catchpoint, t);
635 INHERIT (to_has_exited, t);
636 /* Do not inherit to_mourn_inferior. */
637 INHERIT (to_can_run, t);
638 INHERIT (to_notice_signals, t);
639 /* Do not inherit to_thread_alive. */
640 /* Do not inherit to_find_new_threads. */
641 /* Do not inherit to_pid_to_str. */
642 INHERIT (to_extra_thread_info, t);
643 INHERIT (to_stop, t);
644 /* Do not inherit to_xfer_partial. */
645 INHERIT (to_rcmd, t);
646 INHERIT (to_pid_to_exec_file, t);
647 INHERIT (to_log_command, t);
648 INHERIT (to_stratum, t);
649 /* Do not inherit to_has_all_memory */
650 /* Do not inherit to_has_memory */
651 /* Do not inherit to_has_stack */
652 /* Do not inherit to_has_registers */
653 /* Do not inherit to_has_execution */
654 INHERIT (to_has_thread_control, t);
655 INHERIT (to_can_async_p, t);
656 INHERIT (to_is_async_p, t);
657 INHERIT (to_async, t);
658 INHERIT (to_async_mask, t);
659 INHERIT (to_find_memory_regions, t);
660 INHERIT (to_make_corefile_notes, t);
661 INHERIT (to_get_bookmark, t);
662 INHERIT (to_goto_bookmark, t);
663 /* Do not inherit to_get_thread_local_address. */
664 INHERIT (to_can_execute_reverse, t);
665 INHERIT (to_thread_architecture, t);
666 /* Do not inherit to_read_description. */
667 INHERIT (to_get_ada_task_ptid, t);
668 /* Do not inherit to_search_memory. */
669 INHERIT (to_supports_multi_process, t);
670 INHERIT (to_trace_init, t);
671 INHERIT (to_download_tracepoint, t);
672 INHERIT (to_download_trace_state_variable, t);
673 INHERIT (to_trace_set_readonly_regions, t);
674 INHERIT (to_trace_start, t);
675 INHERIT (to_get_trace_status, t);
676 INHERIT (to_trace_stop, t);
677 INHERIT (to_trace_find, t);
678 INHERIT (to_get_trace_state_variable_value, t);
679 INHERIT (to_save_trace_data, t);
680 INHERIT (to_upload_tracepoints, t);
681 INHERIT (to_upload_trace_state_variables, t);
682 INHERIT (to_get_raw_trace_data, t);
683 INHERIT (to_set_disconnected_tracing, t);
684 INHERIT (to_set_circular_trace_buffer, t);
685 INHERIT (to_get_tib_address, t);
686 INHERIT (to_set_permissions, t);
687 INHERIT (to_static_tracepoint_marker_at, t);
688 INHERIT (to_static_tracepoint_markers_by_strid, t);
689 INHERIT (to_magic, t);
690 /* Do not inherit to_memory_map. */
691 /* Do not inherit to_flash_erase. */
692 /* Do not inherit to_flash_done. */
693 }
694 #undef INHERIT
695
696 /* Clean up a target struct so it no longer has any zero pointers in
697 it. Some entries are defaulted to a method that print an error,
698 others are hard-wired to a standard recursive default. */
699
700 #define de_fault(field, value) \
701 if (!current_target.field) \
702 current_target.field = value
703
704 de_fault (to_open,
705 (void (*) (char *, int))
706 tcomplain);
707 de_fault (to_close,
708 (void (*) (int))
709 target_ignore);
710 de_fault (to_post_attach,
711 (void (*) (int))
712 target_ignore);
713 de_fault (to_prepare_to_store,
714 (void (*) (struct regcache *))
715 noprocess);
716 de_fault (deprecated_xfer_memory,
717 (int (*) (CORE_ADDR, gdb_byte *, int, int,
718 struct mem_attrib *, struct target_ops *))
719 nomemory);
720 de_fault (to_files_info,
721 (void (*) (struct target_ops *))
722 target_ignore);
723 de_fault (to_insert_breakpoint,
724 memory_insert_breakpoint);
725 de_fault (to_remove_breakpoint,
726 memory_remove_breakpoint);
727 de_fault (to_can_use_hw_breakpoint,
728 (int (*) (int, int, int))
729 return_zero);
730 de_fault (to_insert_hw_breakpoint,
731 (int (*) (struct gdbarch *, struct bp_target_info *))
732 return_minus_one);
733 de_fault (to_remove_hw_breakpoint,
734 (int (*) (struct gdbarch *, struct bp_target_info *))
735 return_minus_one);
736 de_fault (to_insert_watchpoint,
737 (int (*) (CORE_ADDR, int, int, struct expression *))
738 return_minus_one);
739 de_fault (to_remove_watchpoint,
740 (int (*) (CORE_ADDR, int, int, struct expression *))
741 return_minus_one);
742 de_fault (to_stopped_by_watchpoint,
743 (int (*) (void))
744 return_zero);
745 de_fault (to_stopped_data_address,
746 (int (*) (struct target_ops *, CORE_ADDR *))
747 return_zero);
748 de_fault (to_watchpoint_addr_within_range,
749 default_watchpoint_addr_within_range);
750 de_fault (to_region_ok_for_hw_watchpoint,
751 default_region_ok_for_hw_watchpoint);
752 de_fault (to_can_accel_watchpoint_condition,
753 (int (*) (CORE_ADDR, int, int, struct expression *))
754 return_zero);
755 de_fault (to_terminal_init,
756 (void (*) (void))
757 target_ignore);
758 de_fault (to_terminal_inferior,
759 (void (*) (void))
760 target_ignore);
761 de_fault (to_terminal_ours_for_output,
762 (void (*) (void))
763 target_ignore);
764 de_fault (to_terminal_ours,
765 (void (*) (void))
766 target_ignore);
767 de_fault (to_terminal_save_ours,
768 (void (*) (void))
769 target_ignore);
770 de_fault (to_terminal_info,
771 default_terminal_info);
772 de_fault (to_load,
773 (void (*) (char *, int))
774 tcomplain);
775 de_fault (to_lookup_symbol,
776 (int (*) (char *, CORE_ADDR *))
777 nosymbol);
778 de_fault (to_post_startup_inferior,
779 (void (*) (ptid_t))
780 target_ignore);
781 de_fault (to_insert_fork_catchpoint,
782 (void (*) (int))
783 tcomplain);
784 de_fault (to_remove_fork_catchpoint,
785 (int (*) (int))
786 tcomplain);
787 de_fault (to_insert_vfork_catchpoint,
788 (void (*) (int))
789 tcomplain);
790 de_fault (to_remove_vfork_catchpoint,
791 (int (*) (int))
792 tcomplain);
793 de_fault (to_insert_exec_catchpoint,
794 (void (*) (int))
795 tcomplain);
796 de_fault (to_remove_exec_catchpoint,
797 (int (*) (int))
798 tcomplain);
799 de_fault (to_set_syscall_catchpoint,
800 (int (*) (int, int, int, int, int *))
801 tcomplain);
802 de_fault (to_has_exited,
803 (int (*) (int, int, int *))
804 return_zero);
805 de_fault (to_can_run,
806 return_zero);
807 de_fault (to_notice_signals,
808 (void (*) (ptid_t))
809 target_ignore);
810 de_fault (to_extra_thread_info,
811 (char *(*) (struct thread_info *))
812 return_zero);
813 de_fault (to_stop,
814 (void (*) (ptid_t))
815 target_ignore);
816 current_target.to_xfer_partial = current_xfer_partial;
817 de_fault (to_rcmd,
818 (void (*) (char *, struct ui_file *))
819 tcomplain);
820 de_fault (to_pid_to_exec_file,
821 (char *(*) (int))
822 return_zero);
823 de_fault (to_async,
824 (void (*) (void (*) (enum inferior_event_type, void*), void*))
825 tcomplain);
826 de_fault (to_async_mask,
827 (int (*) (int))
828 return_one);
829 de_fault (to_thread_architecture,
830 default_thread_architecture);
831 current_target.to_read_description = NULL;
832 de_fault (to_get_ada_task_ptid,
833 (ptid_t (*) (long, long))
834 default_get_ada_task_ptid);
835 de_fault (to_supports_multi_process,
836 (int (*) (void))
837 return_zero);
838 de_fault (to_trace_init,
839 (void (*) (void))
840 tcomplain);
841 de_fault (to_download_tracepoint,
842 (void (*) (struct breakpoint *))
843 tcomplain);
844 de_fault (to_download_trace_state_variable,
845 (void (*) (struct trace_state_variable *))
846 tcomplain);
847 de_fault (to_trace_set_readonly_regions,
848 (void (*) (void))
849 tcomplain);
850 de_fault (to_trace_start,
851 (void (*) (void))
852 tcomplain);
853 de_fault (to_get_trace_status,
854 (int (*) (struct trace_status *))
855 return_minus_one);
856 de_fault (to_trace_stop,
857 (void (*) (void))
858 tcomplain);
859 de_fault (to_trace_find,
860 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
861 return_minus_one);
862 de_fault (to_get_trace_state_variable_value,
863 (int (*) (int, LONGEST *))
864 return_zero);
865 de_fault (to_save_trace_data,
866 (int (*) (const char *))
867 tcomplain);
868 de_fault (to_upload_tracepoints,
869 (int (*) (struct uploaded_tp **))
870 return_zero);
871 de_fault (to_upload_trace_state_variables,
872 (int (*) (struct uploaded_tsv **))
873 return_zero);
874 de_fault (to_get_raw_trace_data,
875 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
876 tcomplain);
877 de_fault (to_set_disconnected_tracing,
878 (void (*) (int))
879 target_ignore);
880 de_fault (to_set_circular_trace_buffer,
881 (void (*) (int))
882 target_ignore);
883 de_fault (to_get_tib_address,
884 (int (*) (ptid_t, CORE_ADDR *))
885 tcomplain);
886 de_fault (to_set_permissions,
887 (void (*) (void))
888 target_ignore);
889 de_fault (to_static_tracepoint_marker_at,
890 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
891 return_zero);
892 de_fault (to_static_tracepoint_markers_by_strid,
893 (VEC(static_tracepoint_marker_p) * (*) (const char *))
894 tcomplain);
895 #undef de_fault
896
897 /* Finally, position the target-stack beneath the squashed
898 "current_target". That way code looking for a non-inherited
899 target method can quickly and simply find it. */
900 current_target.beneath = target_stack;
901
902 if (targetdebug)
903 setup_target_debug ();
904 }
905
906 /* Push a new target type into the stack of the existing target accessors,
907 possibly superseding some of the existing accessors.
908
909 Rather than allow an empty stack, we always have the dummy target at
910 the bottom stratum, so we can call the function vectors without
911 checking them. */
912
913 void
914 push_target (struct target_ops *t)
915 {
916 struct target_ops **cur;
917
918 /* Check magic number. If wrong, it probably means someone changed
919 the struct definition, but not all the places that initialize one. */
920 if (t->to_magic != OPS_MAGIC)
921 {
922 fprintf_unfiltered (gdb_stderr,
923 "Magic number of %s target struct wrong\n",
924 t->to_shortname);
925 internal_error (__FILE__, __LINE__,
926 _("failed internal consistency check"));
927 }
928
929 /* Find the proper stratum to install this target in. */
930 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
931 {
932 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
933 break;
934 }
935
936 /* If there's already targets at this stratum, remove them. */
937 /* FIXME: cagney/2003-10-15: I think this should be popping all
938 targets to CUR, and not just those at this stratum level. */
939 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
940 {
941 /* There's already something at this stratum level. Close it,
942 and un-hook it from the stack. */
943 struct target_ops *tmp = (*cur);
944
945 (*cur) = (*cur)->beneath;
946 tmp->beneath = NULL;
947 target_close (tmp, 0);
948 }
949
950 /* We have removed all targets in our stratum, now add the new one. */
951 t->beneath = (*cur);
952 (*cur) = t;
953
954 update_current_target ();
955 }
956
957 /* Remove a target_ops vector from the stack, wherever it may be.
958 Return how many times it was removed (0 or 1). */
959
960 int
961 unpush_target (struct target_ops *t)
962 {
963 struct target_ops **cur;
964 struct target_ops *tmp;
965
966 if (t->to_stratum == dummy_stratum)
967 internal_error (__FILE__, __LINE__,
968 "Attempt to unpush the dummy target");
969
970 /* Look for the specified target. Note that we assume that a target
971 can only occur once in the target stack. */
972
973 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
974 {
975 if ((*cur) == t)
976 break;
977 }
978
979 if ((*cur) == NULL)
980 return 0; /* Didn't find target_ops, quit now */
981
982 /* NOTE: cagney/2003-12-06: In '94 the close call was made
983 unconditional by moving it to before the above check that the
984 target was in the target stack (something about "Change the way
985 pushing and popping of targets work to support target overlays
986 and inheritance"). This doesn't make much sense - only open
987 targets should be closed. */
988 target_close (t, 0);
989
990 /* Unchain the target */
991 tmp = (*cur);
992 (*cur) = (*cur)->beneath;
993 tmp->beneath = NULL;
994
995 update_current_target ();
996
997 return 1;
998 }
999
1000 void
1001 pop_target (void)
1002 {
1003 target_close (target_stack, 0); /* Let it clean up */
1004 if (unpush_target (target_stack) == 1)
1005 return;
1006
1007 fprintf_unfiltered (gdb_stderr,
1008 "pop_target couldn't find target %s\n",
1009 current_target.to_shortname);
1010 internal_error (__FILE__, __LINE__,
1011 _("failed internal consistency check"));
1012 }
1013
1014 void
1015 pop_all_targets_above (enum strata above_stratum, int quitting)
1016 {
1017 while ((int) (current_target.to_stratum) > (int) above_stratum)
1018 {
1019 target_close (target_stack, quitting);
1020 if (!unpush_target (target_stack))
1021 {
1022 fprintf_unfiltered (gdb_stderr,
1023 "pop_all_targets couldn't find target %s\n",
1024 target_stack->to_shortname);
1025 internal_error (__FILE__, __LINE__,
1026 _("failed internal consistency check"));
1027 break;
1028 }
1029 }
1030 }
1031
1032 void
1033 pop_all_targets (int quitting)
1034 {
1035 pop_all_targets_above (dummy_stratum, quitting);
1036 }
1037
1038 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1039
1040 int
1041 target_is_pushed (struct target_ops *t)
1042 {
1043 struct target_ops **cur;
1044
1045 /* Check magic number. If wrong, it probably means someone changed
1046 the struct definition, but not all the places that initialize one. */
1047 if (t->to_magic != OPS_MAGIC)
1048 {
1049 fprintf_unfiltered (gdb_stderr,
1050 "Magic number of %s target struct wrong\n",
1051 t->to_shortname);
1052 internal_error (__FILE__, __LINE__,
1053 _("failed internal consistency check"));
1054 }
1055
1056 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1057 if (*cur == t)
1058 return 1;
1059
1060 return 0;
1061 }
1062
1063 /* Using the objfile specified in OBJFILE, find the address for the
1064 current thread's thread-local storage with offset OFFSET. */
1065 CORE_ADDR
1066 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1067 {
1068 volatile CORE_ADDR addr = 0;
1069 struct target_ops *target;
1070
1071 for (target = current_target.beneath;
1072 target != NULL;
1073 target = target->beneath)
1074 {
1075 if (target->to_get_thread_local_address != NULL)
1076 break;
1077 }
1078
1079 if (target != NULL
1080 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1081 {
1082 ptid_t ptid = inferior_ptid;
1083 volatile struct gdb_exception ex;
1084
1085 TRY_CATCH (ex, RETURN_MASK_ALL)
1086 {
1087 CORE_ADDR lm_addr;
1088
1089 /* Fetch the load module address for this objfile. */
1090 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1091 objfile);
1092 /* If it's 0, throw the appropriate exception. */
1093 if (lm_addr == 0)
1094 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1095 _("TLS load module not found"));
1096
1097 addr = target->to_get_thread_local_address (target, ptid,
1098 lm_addr, offset);
1099 }
1100 /* If an error occurred, print TLS related messages here. Otherwise,
1101 throw the error to some higher catcher. */
1102 if (ex.reason < 0)
1103 {
1104 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1105
1106 switch (ex.error)
1107 {
1108 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1109 error (_("Cannot find thread-local variables "
1110 "in this thread library."));
1111 break;
1112 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1113 if (objfile_is_library)
1114 error (_("Cannot find shared library `%s' in dynamic"
1115 " linker's load module list"), objfile->name);
1116 else
1117 error (_("Cannot find executable file `%s' in dynamic"
1118 " linker's load module list"), objfile->name);
1119 break;
1120 case TLS_NOT_ALLOCATED_YET_ERROR:
1121 if (objfile_is_library)
1122 error (_("The inferior has not yet allocated storage for"
1123 " thread-local variables in\n"
1124 "the shared library `%s'\n"
1125 "for %s"),
1126 objfile->name, target_pid_to_str (ptid));
1127 else
1128 error (_("The inferior has not yet allocated storage for"
1129 " thread-local variables in\n"
1130 "the executable `%s'\n"
1131 "for %s"),
1132 objfile->name, target_pid_to_str (ptid));
1133 break;
1134 case TLS_GENERIC_ERROR:
1135 if (objfile_is_library)
1136 error (_("Cannot find thread-local storage for %s, "
1137 "shared library %s:\n%s"),
1138 target_pid_to_str (ptid),
1139 objfile->name, ex.message);
1140 else
1141 error (_("Cannot find thread-local storage for %s, "
1142 "executable file %s:\n%s"),
1143 target_pid_to_str (ptid),
1144 objfile->name, ex.message);
1145 break;
1146 default:
1147 throw_exception (ex);
1148 break;
1149 }
1150 }
1151 }
1152 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1153 TLS is an ABI-specific thing. But we don't do that yet. */
1154 else
1155 error (_("Cannot find thread-local variables on this target"));
1156
1157 return addr;
1158 }
1159
1160 #undef MIN
1161 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1162
1163 /* target_read_string -- read a null terminated string, up to LEN bytes,
1164 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1165 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1166 is responsible for freeing it. Return the number of bytes successfully
1167 read. */
1168
1169 int
1170 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1171 {
1172 int tlen, origlen, offset, i;
1173 gdb_byte buf[4];
1174 int errcode = 0;
1175 char *buffer;
1176 int buffer_allocated;
1177 char *bufptr;
1178 unsigned int nbytes_read = 0;
1179
1180 gdb_assert (string);
1181
1182 /* Small for testing. */
1183 buffer_allocated = 4;
1184 buffer = xmalloc (buffer_allocated);
1185 bufptr = buffer;
1186
1187 origlen = len;
1188
1189 while (len > 0)
1190 {
1191 tlen = MIN (len, 4 - (memaddr & 3));
1192 offset = memaddr & 3;
1193
1194 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1195 if (errcode != 0)
1196 {
1197 /* The transfer request might have crossed the boundary to an
1198 unallocated region of memory. Retry the transfer, requesting
1199 a single byte. */
1200 tlen = 1;
1201 offset = 0;
1202 errcode = target_read_memory (memaddr, buf, 1);
1203 if (errcode != 0)
1204 goto done;
1205 }
1206
1207 if (bufptr - buffer + tlen > buffer_allocated)
1208 {
1209 unsigned int bytes;
1210
1211 bytes = bufptr - buffer;
1212 buffer_allocated *= 2;
1213 buffer = xrealloc (buffer, buffer_allocated);
1214 bufptr = buffer + bytes;
1215 }
1216
1217 for (i = 0; i < tlen; i++)
1218 {
1219 *bufptr++ = buf[i + offset];
1220 if (buf[i + offset] == '\000')
1221 {
1222 nbytes_read += i + 1;
1223 goto done;
1224 }
1225 }
1226
1227 memaddr += tlen;
1228 len -= tlen;
1229 nbytes_read += tlen;
1230 }
1231 done:
1232 *string = buffer;
1233 if (errnop != NULL)
1234 *errnop = errcode;
1235 return nbytes_read;
1236 }
1237
1238 struct target_section_table *
1239 target_get_section_table (struct target_ops *target)
1240 {
1241 struct target_ops *t;
1242
1243 if (targetdebug)
1244 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1245
1246 for (t = target; t != NULL; t = t->beneath)
1247 if (t->to_get_section_table != NULL)
1248 return (*t->to_get_section_table) (t);
1249
1250 return NULL;
1251 }
1252
1253 /* Find a section containing ADDR. */
1254
1255 struct target_section *
1256 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1257 {
1258 struct target_section_table *table = target_get_section_table (target);
1259 struct target_section *secp;
1260
1261 if (table == NULL)
1262 return NULL;
1263
1264 for (secp = table->sections; secp < table->sections_end; secp++)
1265 {
1266 if (addr >= secp->addr && addr < secp->endaddr)
1267 return secp;
1268 }
1269 return NULL;
1270 }
1271
1272 /* Perform a partial memory transfer.
1273 For docs see target.h, to_xfer_partial. */
1274
1275 static LONGEST
1276 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1277 void *readbuf, const void *writebuf, ULONGEST memaddr,
1278 LONGEST len)
1279 {
1280 LONGEST res;
1281 int reg_len;
1282 struct mem_region *region;
1283 struct inferior *inf;
1284
1285 /* Zero length requests are ok and require no work. */
1286 if (len == 0)
1287 return 0;
1288
1289 /* For accesses to unmapped overlay sections, read directly from
1290 files. Must do this first, as MEMADDR may need adjustment. */
1291 if (readbuf != NULL && overlay_debugging)
1292 {
1293 struct obj_section *section = find_pc_overlay (memaddr);
1294
1295 if (pc_in_unmapped_range (memaddr, section))
1296 {
1297 struct target_section_table *table
1298 = target_get_section_table (ops);
1299 const char *section_name = section->the_bfd_section->name;
1300
1301 memaddr = overlay_mapped_address (memaddr, section);
1302 return section_table_xfer_memory_partial (readbuf, writebuf,
1303 memaddr, len,
1304 table->sections,
1305 table->sections_end,
1306 section_name);
1307 }
1308 }
1309
1310 /* Try the executable files, if "trust-readonly-sections" is set. */
1311 if (readbuf != NULL && trust_readonly)
1312 {
1313 struct target_section *secp;
1314 struct target_section_table *table;
1315
1316 secp = target_section_by_addr (ops, memaddr);
1317 if (secp != NULL
1318 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1319 & SEC_READONLY))
1320 {
1321 table = target_get_section_table (ops);
1322 return section_table_xfer_memory_partial (readbuf, writebuf,
1323 memaddr, len,
1324 table->sections,
1325 table->sections_end,
1326 NULL);
1327 }
1328 }
1329
1330 /* Try GDB's internal data cache. */
1331 region = lookup_mem_region (memaddr);
1332 /* region->hi == 0 means there's no upper bound. */
1333 if (memaddr + len < region->hi || region->hi == 0)
1334 reg_len = len;
1335 else
1336 reg_len = region->hi - memaddr;
1337
1338 switch (region->attrib.mode)
1339 {
1340 case MEM_RO:
1341 if (writebuf != NULL)
1342 return -1;
1343 break;
1344
1345 case MEM_WO:
1346 if (readbuf != NULL)
1347 return -1;
1348 break;
1349
1350 case MEM_FLASH:
1351 /* We only support writing to flash during "load" for now. */
1352 if (writebuf != NULL)
1353 error (_("Writing to flash memory forbidden in this context"));
1354 break;
1355
1356 case MEM_NONE:
1357 return -1;
1358 }
1359
1360 if (!ptid_equal (inferior_ptid, null_ptid))
1361 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1362 else
1363 inf = NULL;
1364
1365 if (inf != NULL
1366 /* The dcache reads whole cache lines; that doesn't play well
1367 with reading from a trace buffer, because reading outside of
1368 the collected memory range fails. */
1369 && get_traceframe_number () == -1
1370 && (region->attrib.cache
1371 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1372 {
1373 if (readbuf != NULL)
1374 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1375 reg_len, 0);
1376 else
1377 /* FIXME drow/2006-08-09: If we're going to preserve const
1378 correctness dcache_xfer_memory should take readbuf and
1379 writebuf. */
1380 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1381 (void *) writebuf,
1382 reg_len, 1);
1383 if (res <= 0)
1384 return -1;
1385 else
1386 {
1387 if (readbuf && !show_memory_breakpoints)
1388 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1389 return res;
1390 }
1391 }
1392
1393 /* If none of those methods found the memory we wanted, fall back
1394 to a target partial transfer. Normally a single call to
1395 to_xfer_partial is enough; if it doesn't recognize an object
1396 it will call the to_xfer_partial of the next target down.
1397 But for memory this won't do. Memory is the only target
1398 object which can be read from more than one valid target.
1399 A core file, for instance, could have some of memory but
1400 delegate other bits to the target below it. So, we must
1401 manually try all targets. */
1402
1403 do
1404 {
1405 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1406 readbuf, writebuf, memaddr, reg_len);
1407 if (res > 0)
1408 break;
1409
1410 /* We want to continue past core files to executables, but not
1411 past a running target's memory. */
1412 if (ops->to_has_all_memory (ops))
1413 break;
1414
1415 ops = ops->beneath;
1416 }
1417 while (ops != NULL);
1418
1419 if (readbuf && !show_memory_breakpoints)
1420 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1421
1422 /* Make sure the cache gets updated no matter what - if we are writing
1423 to the stack. Even if this write is not tagged as such, we still need
1424 to update the cache. */
1425
1426 if (res > 0
1427 && inf != NULL
1428 && writebuf != NULL
1429 && !region->attrib.cache
1430 && stack_cache_enabled_p
1431 && object != TARGET_OBJECT_STACK_MEMORY)
1432 {
1433 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1434 }
1435
1436 /* If we still haven't got anything, return the last error. We
1437 give up. */
1438 return res;
1439 }
1440
1441 static void
1442 restore_show_memory_breakpoints (void *arg)
1443 {
1444 show_memory_breakpoints = (uintptr_t) arg;
1445 }
1446
1447 struct cleanup *
1448 make_show_memory_breakpoints_cleanup (int show)
1449 {
1450 int current = show_memory_breakpoints;
1451
1452 show_memory_breakpoints = show;
1453 return make_cleanup (restore_show_memory_breakpoints,
1454 (void *) (uintptr_t) current);
1455 }
1456
1457 /* For docs see target.h, to_xfer_partial. */
1458
1459 static LONGEST
1460 target_xfer_partial (struct target_ops *ops,
1461 enum target_object object, const char *annex,
1462 void *readbuf, const void *writebuf,
1463 ULONGEST offset, LONGEST len)
1464 {
1465 LONGEST retval;
1466
1467 gdb_assert (ops->to_xfer_partial != NULL);
1468
1469 if (writebuf && !may_write_memory)
1470 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1471 core_addr_to_string_nz (offset), plongest (len));
1472
1473 /* If this is a memory transfer, let the memory-specific code
1474 have a look at it instead. Memory transfers are more
1475 complicated. */
1476 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1477 retval = memory_xfer_partial (ops, object, readbuf,
1478 writebuf, offset, len);
1479 else
1480 {
1481 enum target_object raw_object = object;
1482
1483 /* If this is a raw memory transfer, request the normal
1484 memory object from other layers. */
1485 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1486 raw_object = TARGET_OBJECT_MEMORY;
1487
1488 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1489 writebuf, offset, len);
1490 }
1491
1492 if (targetdebug)
1493 {
1494 const unsigned char *myaddr = NULL;
1495
1496 fprintf_unfiltered (gdb_stdlog,
1497 "%s:target_xfer_partial "
1498 "(%d, %s, %s, %s, %s, %s) = %s",
1499 ops->to_shortname,
1500 (int) object,
1501 (annex ? annex : "(null)"),
1502 host_address_to_string (readbuf),
1503 host_address_to_string (writebuf),
1504 core_addr_to_string_nz (offset),
1505 plongest (len), plongest (retval));
1506
1507 if (readbuf)
1508 myaddr = readbuf;
1509 if (writebuf)
1510 myaddr = writebuf;
1511 if (retval > 0 && myaddr != NULL)
1512 {
1513 int i;
1514
1515 fputs_unfiltered (", bytes =", gdb_stdlog);
1516 for (i = 0; i < retval; i++)
1517 {
1518 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1519 {
1520 if (targetdebug < 2 && i > 0)
1521 {
1522 fprintf_unfiltered (gdb_stdlog, " ...");
1523 break;
1524 }
1525 fprintf_unfiltered (gdb_stdlog, "\n");
1526 }
1527
1528 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1529 }
1530 }
1531
1532 fputc_unfiltered ('\n', gdb_stdlog);
1533 }
1534 return retval;
1535 }
1536
1537 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1538 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1539 if any error occurs.
1540
1541 If an error occurs, no guarantee is made about the contents of the data at
1542 MYADDR. In particular, the caller should not depend upon partial reads
1543 filling the buffer with good data. There is no way for the caller to know
1544 how much good data might have been transfered anyway. Callers that can
1545 deal with partial reads should call target_read (which will retry until
1546 it makes no progress, and then return how much was transferred). */
1547
1548 int
1549 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1550 {
1551 /* Dispatch to the topmost target, not the flattened current_target.
1552 Memory accesses check target->to_has_(all_)memory, and the
1553 flattened target doesn't inherit those. */
1554 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1555 myaddr, memaddr, len) == len)
1556 return 0;
1557 else
1558 return EIO;
1559 }
1560
1561 /* Like target_read_memory, but specify explicitly that this is a read from
1562 the target's stack. This may trigger different cache behavior. */
1563
1564 int
1565 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1566 {
1567 /* Dispatch to the topmost target, not the flattened current_target.
1568 Memory accesses check target->to_has_(all_)memory, and the
1569 flattened target doesn't inherit those. */
1570
1571 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1572 myaddr, memaddr, len) == len)
1573 return 0;
1574 else
1575 return EIO;
1576 }
1577
1578 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1579 Returns either 0 for success or an errno value if any error occurs.
1580 If an error occurs, no guarantee is made about how much data got written.
1581 Callers that can deal with partial writes should call target_write. */
1582
1583 int
1584 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1585 {
1586 /* Dispatch to the topmost target, not the flattened current_target.
1587 Memory accesses check target->to_has_(all_)memory, and the
1588 flattened target doesn't inherit those. */
1589 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1590 myaddr, memaddr, len) == len)
1591 return 0;
1592 else
1593 return EIO;
1594 }
1595
1596 /* Fetch the target's memory map. */
1597
1598 VEC(mem_region_s) *
1599 target_memory_map (void)
1600 {
1601 VEC(mem_region_s) *result;
1602 struct mem_region *last_one, *this_one;
1603 int ix;
1604 struct target_ops *t;
1605
1606 if (targetdebug)
1607 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1608
1609 for (t = current_target.beneath; t != NULL; t = t->beneath)
1610 if (t->to_memory_map != NULL)
1611 break;
1612
1613 if (t == NULL)
1614 return NULL;
1615
1616 result = t->to_memory_map (t);
1617 if (result == NULL)
1618 return NULL;
1619
1620 qsort (VEC_address (mem_region_s, result),
1621 VEC_length (mem_region_s, result),
1622 sizeof (struct mem_region), mem_region_cmp);
1623
1624 /* Check that regions do not overlap. Simultaneously assign
1625 a numbering for the "mem" commands to use to refer to
1626 each region. */
1627 last_one = NULL;
1628 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1629 {
1630 this_one->number = ix;
1631
1632 if (last_one && last_one->hi > this_one->lo)
1633 {
1634 warning (_("Overlapping regions in memory map: ignoring"));
1635 VEC_free (mem_region_s, result);
1636 return NULL;
1637 }
1638 last_one = this_one;
1639 }
1640
1641 return result;
1642 }
1643
1644 void
1645 target_flash_erase (ULONGEST address, LONGEST length)
1646 {
1647 struct target_ops *t;
1648
1649 for (t = current_target.beneath; t != NULL; t = t->beneath)
1650 if (t->to_flash_erase != NULL)
1651 {
1652 if (targetdebug)
1653 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1654 hex_string (address), phex (length, 0));
1655 t->to_flash_erase (t, address, length);
1656 return;
1657 }
1658
1659 tcomplain ();
1660 }
1661
1662 void
1663 target_flash_done (void)
1664 {
1665 struct target_ops *t;
1666
1667 for (t = current_target.beneath; t != NULL; t = t->beneath)
1668 if (t->to_flash_done != NULL)
1669 {
1670 if (targetdebug)
1671 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1672 t->to_flash_done (t);
1673 return;
1674 }
1675
1676 tcomplain ();
1677 }
1678
1679 static void
1680 show_trust_readonly (struct ui_file *file, int from_tty,
1681 struct cmd_list_element *c, const char *value)
1682 {
1683 fprintf_filtered (file,
1684 _("Mode for reading from readonly sections is %s.\n"),
1685 value);
1686 }
1687
1688 /* More generic transfers. */
1689
1690 static LONGEST
1691 default_xfer_partial (struct target_ops *ops, enum target_object object,
1692 const char *annex, gdb_byte *readbuf,
1693 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1694 {
1695 if (object == TARGET_OBJECT_MEMORY
1696 && ops->deprecated_xfer_memory != NULL)
1697 /* If available, fall back to the target's
1698 "deprecated_xfer_memory" method. */
1699 {
1700 int xfered = -1;
1701
1702 errno = 0;
1703 if (writebuf != NULL)
1704 {
1705 void *buffer = xmalloc (len);
1706 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1707
1708 memcpy (buffer, writebuf, len);
1709 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1710 1/*write*/, NULL, ops);
1711 do_cleanups (cleanup);
1712 }
1713 if (readbuf != NULL)
1714 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1715 0/*read*/, NULL, ops);
1716 if (xfered > 0)
1717 return xfered;
1718 else if (xfered == 0 && errno == 0)
1719 /* "deprecated_xfer_memory" uses 0, cross checked against
1720 ERRNO as one indication of an error. */
1721 return 0;
1722 else
1723 return -1;
1724 }
1725 else if (ops->beneath != NULL)
1726 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1727 readbuf, writebuf, offset, len);
1728 else
1729 return -1;
1730 }
1731
1732 /* The xfer_partial handler for the topmost target. Unlike the default,
1733 it does not need to handle memory specially; it just passes all
1734 requests down the stack. */
1735
1736 static LONGEST
1737 current_xfer_partial (struct target_ops *ops, enum target_object object,
1738 const char *annex, gdb_byte *readbuf,
1739 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1740 {
1741 if (ops->beneath != NULL)
1742 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1743 readbuf, writebuf, offset, len);
1744 else
1745 return -1;
1746 }
1747
1748 /* Target vector read/write partial wrapper functions. */
1749
1750 static LONGEST
1751 target_read_partial (struct target_ops *ops,
1752 enum target_object object,
1753 const char *annex, gdb_byte *buf,
1754 ULONGEST offset, LONGEST len)
1755 {
1756 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1757 }
1758
1759 static LONGEST
1760 target_write_partial (struct target_ops *ops,
1761 enum target_object object,
1762 const char *annex, const gdb_byte *buf,
1763 ULONGEST offset, LONGEST len)
1764 {
1765 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1766 }
1767
1768 /* Wrappers to perform the full transfer. */
1769
1770 /* For docs on target_read see target.h. */
1771
1772 LONGEST
1773 target_read (struct target_ops *ops,
1774 enum target_object object,
1775 const char *annex, gdb_byte *buf,
1776 ULONGEST offset, LONGEST len)
1777 {
1778 LONGEST xfered = 0;
1779
1780 while (xfered < len)
1781 {
1782 LONGEST xfer = target_read_partial (ops, object, annex,
1783 (gdb_byte *) buf + xfered,
1784 offset + xfered, len - xfered);
1785
1786 /* Call an observer, notifying them of the xfer progress? */
1787 if (xfer == 0)
1788 return xfered;
1789 if (xfer < 0)
1790 return -1;
1791 xfered += xfer;
1792 QUIT;
1793 }
1794 return len;
1795 }
1796
1797 /** Assuming that the entire [begin, end) range of memory cannot be read,
1798 try to read whatever subrange is possible to read.
1799
1800 The function results, in RESULT, either zero or one memory block.
1801 If there's a readable subrange at the beginning, it is completely
1802 read and returned. Any further readable subrange will not be read.
1803 Otherwise, if there's a readable subrange at the end, it will be
1804 completely read and returned. Any readable subranges before it (obviously,
1805 not starting at the beginning), will be ignored. In other cases --
1806 either no readable subrange, or readable subrange (s) that is neither
1807 at the beginning, or end, nothing is returned.
1808
1809 The purpose of this function is to handle a read across a boundary of
1810 accessible memory in a case when memory map is not available. The above
1811 restrictions are fine for this case, but will give incorrect results if
1812 the memory is 'patchy'. However, supporting 'patchy' memory would require
1813 trying to read every single byte, and it seems unacceptable solution.
1814 Explicit memory map is recommended for this case -- and
1815 target_read_memory_robust will take care of reading multiple ranges
1816 then. */
1817
1818 static void
1819 read_whatever_is_readable (struct target_ops *ops,
1820 ULONGEST begin, ULONGEST end,
1821 VEC(memory_read_result_s) **result)
1822 {
1823 gdb_byte *buf = xmalloc (end-begin);
1824 ULONGEST current_begin = begin;
1825 ULONGEST current_end = end;
1826 int forward;
1827 memory_read_result_s r;
1828
1829 /* If we previously failed to read 1 byte, nothing can be done here. */
1830 if (end - begin <= 1)
1831 return;
1832
1833 /* Check that either first or the last byte is readable, and give up
1834 if not. This heuristic is meant to permit reading accessible memory
1835 at the boundary of accessible region. */
1836 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1837 buf, begin, 1) == 1)
1838 {
1839 forward = 1;
1840 ++current_begin;
1841 }
1842 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1843 buf + (end-begin) - 1, end - 1, 1) == 1)
1844 {
1845 forward = 0;
1846 --current_end;
1847 }
1848 else
1849 {
1850 return;
1851 }
1852
1853 /* Loop invariant is that the [current_begin, current_end) was previously
1854 found to be not readable as a whole.
1855
1856 Note loop condition -- if the range has 1 byte, we can't divide the range
1857 so there's no point trying further. */
1858 while (current_end - current_begin > 1)
1859 {
1860 ULONGEST first_half_begin, first_half_end;
1861 ULONGEST second_half_begin, second_half_end;
1862 LONGEST xfer;
1863
1864 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1865 if (forward)
1866 {
1867 first_half_begin = current_begin;
1868 first_half_end = middle;
1869 second_half_begin = middle;
1870 second_half_end = current_end;
1871 }
1872 else
1873 {
1874 first_half_begin = middle;
1875 first_half_end = current_end;
1876 second_half_begin = current_begin;
1877 second_half_end = middle;
1878 }
1879
1880 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1881 buf + (first_half_begin - begin),
1882 first_half_begin,
1883 first_half_end - first_half_begin);
1884
1885 if (xfer == first_half_end - first_half_begin)
1886 {
1887 /* This half reads up fine. So, the error must be in the
1888 other half. */
1889 current_begin = second_half_begin;
1890 current_end = second_half_end;
1891 }
1892 else
1893 {
1894 /* This half is not readable. Because we've tried one byte, we
1895 know some part of this half if actually redable. Go to the next
1896 iteration to divide again and try to read.
1897
1898 We don't handle the other half, because this function only tries
1899 to read a single readable subrange. */
1900 current_begin = first_half_begin;
1901 current_end = first_half_end;
1902 }
1903 }
1904
1905 if (forward)
1906 {
1907 /* The [begin, current_begin) range has been read. */
1908 r.begin = begin;
1909 r.end = current_begin;
1910 r.data = buf;
1911 }
1912 else
1913 {
1914 /* The [current_end, end) range has been read. */
1915 LONGEST rlen = end - current_end;
1916 r.data = xmalloc (rlen);
1917 memcpy (r.data, buf + current_end - begin, rlen);
1918 r.begin = current_end;
1919 r.end = end;
1920 xfree (buf);
1921 }
1922 VEC_safe_push(memory_read_result_s, (*result), &r);
1923 }
1924
1925 void
1926 free_memory_read_result_vector (void *x)
1927 {
1928 VEC(memory_read_result_s) *v = x;
1929 memory_read_result_s *current;
1930 int ix;
1931
1932 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1933 {
1934 xfree (current->data);
1935 }
1936 VEC_free (memory_read_result_s, v);
1937 }
1938
1939 VEC(memory_read_result_s) *
1940 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1941 {
1942 VEC(memory_read_result_s) *result = 0;
1943
1944 LONGEST xfered = 0;
1945 while (xfered < len)
1946 {
1947 struct mem_region *region = lookup_mem_region (offset + xfered);
1948 LONGEST rlen;
1949
1950 /* If there is no explicit region, a fake one should be created. */
1951 gdb_assert (region);
1952
1953 if (region->hi == 0)
1954 rlen = len - xfered;
1955 else
1956 rlen = region->hi - offset;
1957
1958 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1959 {
1960 /* Cannot read this region. Note that we can end up here only
1961 if the region is explicitly marked inaccessible, or
1962 'inaccessible-by-default' is in effect. */
1963 xfered += rlen;
1964 }
1965 else
1966 {
1967 LONGEST to_read = min (len - xfered, rlen);
1968 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1969
1970 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1971 (gdb_byte *) buffer,
1972 offset + xfered, to_read);
1973 /* Call an observer, notifying them of the xfer progress? */
1974 if (xfer <= 0)
1975 {
1976 /* Got an error reading full chunk. See if maybe we can read
1977 some subrange. */
1978 xfree (buffer);
1979 read_whatever_is_readable (ops, offset + xfered,
1980 offset + xfered + to_read, &result);
1981 xfered += to_read;
1982 }
1983 else
1984 {
1985 struct memory_read_result r;
1986 r.data = buffer;
1987 r.begin = offset + xfered;
1988 r.end = r.begin + xfer;
1989 VEC_safe_push (memory_read_result_s, result, &r);
1990 xfered += xfer;
1991 }
1992 QUIT;
1993 }
1994 }
1995 return result;
1996 }
1997
1998
1999 /* An alternative to target_write with progress callbacks. */
2000
2001 LONGEST
2002 target_write_with_progress (struct target_ops *ops,
2003 enum target_object object,
2004 const char *annex, const gdb_byte *buf,
2005 ULONGEST offset, LONGEST len,
2006 void (*progress) (ULONGEST, void *), void *baton)
2007 {
2008 LONGEST xfered = 0;
2009
2010 /* Give the progress callback a chance to set up. */
2011 if (progress)
2012 (*progress) (0, baton);
2013
2014 while (xfered < len)
2015 {
2016 LONGEST xfer = target_write_partial (ops, object, annex,
2017 (gdb_byte *) buf + xfered,
2018 offset + xfered, len - xfered);
2019
2020 if (xfer == 0)
2021 return xfered;
2022 if (xfer < 0)
2023 return -1;
2024
2025 if (progress)
2026 (*progress) (xfer, baton);
2027
2028 xfered += xfer;
2029 QUIT;
2030 }
2031 return len;
2032 }
2033
2034 /* For docs on target_write see target.h. */
2035
2036 LONGEST
2037 target_write (struct target_ops *ops,
2038 enum target_object object,
2039 const char *annex, const gdb_byte *buf,
2040 ULONGEST offset, LONGEST len)
2041 {
2042 return target_write_with_progress (ops, object, annex, buf, offset, len,
2043 NULL, NULL);
2044 }
2045
2046 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2047 the size of the transferred data. PADDING additional bytes are
2048 available in *BUF_P. This is a helper function for
2049 target_read_alloc; see the declaration of that function for more
2050 information. */
2051
2052 static LONGEST
2053 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2054 const char *annex, gdb_byte **buf_p, int padding)
2055 {
2056 size_t buf_alloc, buf_pos;
2057 gdb_byte *buf;
2058 LONGEST n;
2059
2060 /* This function does not have a length parameter; it reads the
2061 entire OBJECT). Also, it doesn't support objects fetched partly
2062 from one target and partly from another (in a different stratum,
2063 e.g. a core file and an executable). Both reasons make it
2064 unsuitable for reading memory. */
2065 gdb_assert (object != TARGET_OBJECT_MEMORY);
2066
2067 /* Start by reading up to 4K at a time. The target will throttle
2068 this number down if necessary. */
2069 buf_alloc = 4096;
2070 buf = xmalloc (buf_alloc);
2071 buf_pos = 0;
2072 while (1)
2073 {
2074 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2075 buf_pos, buf_alloc - buf_pos - padding);
2076 if (n < 0)
2077 {
2078 /* An error occurred. */
2079 xfree (buf);
2080 return -1;
2081 }
2082 else if (n == 0)
2083 {
2084 /* Read all there was. */
2085 if (buf_pos == 0)
2086 xfree (buf);
2087 else
2088 *buf_p = buf;
2089 return buf_pos;
2090 }
2091
2092 buf_pos += n;
2093
2094 /* If the buffer is filling up, expand it. */
2095 if (buf_alloc < buf_pos * 2)
2096 {
2097 buf_alloc *= 2;
2098 buf = xrealloc (buf, buf_alloc);
2099 }
2100
2101 QUIT;
2102 }
2103 }
2104
2105 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2106 the size of the transferred data. See the declaration in "target.h"
2107 function for more information about the return value. */
2108
2109 LONGEST
2110 target_read_alloc (struct target_ops *ops, enum target_object object,
2111 const char *annex, gdb_byte **buf_p)
2112 {
2113 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2114 }
2115
2116 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2117 returned as a string, allocated using xmalloc. If an error occurs
2118 or the transfer is unsupported, NULL is returned. Empty objects
2119 are returned as allocated but empty strings. A warning is issued
2120 if the result contains any embedded NUL bytes. */
2121
2122 char *
2123 target_read_stralloc (struct target_ops *ops, enum target_object object,
2124 const char *annex)
2125 {
2126 gdb_byte *buffer;
2127 LONGEST transferred;
2128
2129 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2130
2131 if (transferred < 0)
2132 return NULL;
2133
2134 if (transferred == 0)
2135 return xstrdup ("");
2136
2137 buffer[transferred] = 0;
2138 if (strlen (buffer) < transferred)
2139 warning (_("target object %d, annex %s, "
2140 "contained unexpected null characters"),
2141 (int) object, annex ? annex : "(none)");
2142
2143 return (char *) buffer;
2144 }
2145
2146 /* Memory transfer methods. */
2147
2148 void
2149 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2150 LONGEST len)
2151 {
2152 /* This method is used to read from an alternate, non-current
2153 target. This read must bypass the overlay support (as symbols
2154 don't match this target), and GDB's internal cache (wrong cache
2155 for this target). */
2156 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2157 != len)
2158 memory_error (EIO, addr);
2159 }
2160
2161 ULONGEST
2162 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2163 int len, enum bfd_endian byte_order)
2164 {
2165 gdb_byte buf[sizeof (ULONGEST)];
2166
2167 gdb_assert (len <= sizeof (buf));
2168 get_target_memory (ops, addr, buf, len);
2169 return extract_unsigned_integer (buf, len, byte_order);
2170 }
2171
2172 int
2173 target_insert_breakpoint (struct gdbarch *gdbarch,
2174 struct bp_target_info *bp_tgt)
2175 {
2176 if (!may_insert_breakpoints)
2177 {
2178 warning (_("May not insert breakpoints"));
2179 return 1;
2180 }
2181
2182 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2183 }
2184
2185 int
2186 target_remove_breakpoint (struct gdbarch *gdbarch,
2187 struct bp_target_info *bp_tgt)
2188 {
2189 /* This is kind of a weird case to handle, but the permission might
2190 have been changed after breakpoints were inserted - in which case
2191 we should just take the user literally and assume that any
2192 breakpoints should be left in place. */
2193 if (!may_insert_breakpoints)
2194 {
2195 warning (_("May not remove breakpoints"));
2196 return 1;
2197 }
2198
2199 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2200 }
2201
2202 static void
2203 target_info (char *args, int from_tty)
2204 {
2205 struct target_ops *t;
2206 int has_all_mem = 0;
2207
2208 if (symfile_objfile != NULL)
2209 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2210
2211 for (t = target_stack; t != NULL; t = t->beneath)
2212 {
2213 if (!(*t->to_has_memory) (t))
2214 continue;
2215
2216 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2217 continue;
2218 if (has_all_mem)
2219 printf_unfiltered (_("\tWhile running this, "
2220 "GDB does not access memory from...\n"));
2221 printf_unfiltered ("%s:\n", t->to_longname);
2222 (t->to_files_info) (t);
2223 has_all_mem = (*t->to_has_all_memory) (t);
2224 }
2225 }
2226
2227 /* This function is called before any new inferior is created, e.g.
2228 by running a program, attaching, or connecting to a target.
2229 It cleans up any state from previous invocations which might
2230 change between runs. This is a subset of what target_preopen
2231 resets (things which might change between targets). */
2232
2233 void
2234 target_pre_inferior (int from_tty)
2235 {
2236 /* Clear out solib state. Otherwise the solib state of the previous
2237 inferior might have survived and is entirely wrong for the new
2238 target. This has been observed on GNU/Linux using glibc 2.3. How
2239 to reproduce:
2240
2241 bash$ ./foo&
2242 [1] 4711
2243 bash$ ./foo&
2244 [1] 4712
2245 bash$ gdb ./foo
2246 [...]
2247 (gdb) attach 4711
2248 (gdb) detach
2249 (gdb) attach 4712
2250 Cannot access memory at address 0xdeadbeef
2251 */
2252
2253 /* In some OSs, the shared library list is the same/global/shared
2254 across inferiors. If code is shared between processes, so are
2255 memory regions and features. */
2256 if (!gdbarch_has_global_solist (target_gdbarch))
2257 {
2258 no_shared_libraries (NULL, from_tty);
2259
2260 invalidate_target_mem_regions ();
2261
2262 target_clear_description ();
2263 }
2264 }
2265
2266 /* Callback for iterate_over_inferiors. Gets rid of the given
2267 inferior. */
2268
2269 static int
2270 dispose_inferior (struct inferior *inf, void *args)
2271 {
2272 struct thread_info *thread;
2273
2274 thread = any_thread_of_process (inf->pid);
2275 if (thread)
2276 {
2277 switch_to_thread (thread->ptid);
2278
2279 /* Core inferiors actually should be detached, not killed. */
2280 if (target_has_execution)
2281 target_kill ();
2282 else
2283 target_detach (NULL, 0);
2284 }
2285
2286 return 0;
2287 }
2288
2289 /* This is to be called by the open routine before it does
2290 anything. */
2291
2292 void
2293 target_preopen (int from_tty)
2294 {
2295 dont_repeat ();
2296
2297 if (have_inferiors ())
2298 {
2299 if (!from_tty
2300 || !have_live_inferiors ()
2301 || query (_("A program is being debugged already. Kill it? ")))
2302 iterate_over_inferiors (dispose_inferior, NULL);
2303 else
2304 error (_("Program not killed."));
2305 }
2306
2307 /* Calling target_kill may remove the target from the stack. But if
2308 it doesn't (which seems like a win for UDI), remove it now. */
2309 /* Leave the exec target, though. The user may be switching from a
2310 live process to a core of the same program. */
2311 pop_all_targets_above (file_stratum, 0);
2312
2313 target_pre_inferior (from_tty);
2314 }
2315
2316 /* Detach a target after doing deferred register stores. */
2317
2318 void
2319 target_detach (char *args, int from_tty)
2320 {
2321 struct target_ops* t;
2322
2323 if (gdbarch_has_global_breakpoints (target_gdbarch))
2324 /* Don't remove global breakpoints here. They're removed on
2325 disconnection from the target. */
2326 ;
2327 else
2328 /* If we're in breakpoints-always-inserted mode, have to remove
2329 them before detaching. */
2330 remove_breakpoints_pid (PIDGET (inferior_ptid));
2331
2332 prepare_for_detach ();
2333
2334 for (t = current_target.beneath; t != NULL; t = t->beneath)
2335 {
2336 if (t->to_detach != NULL)
2337 {
2338 t->to_detach (t, args, from_tty);
2339 if (targetdebug)
2340 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2341 args, from_tty);
2342 return;
2343 }
2344 }
2345
2346 internal_error (__FILE__, __LINE__, "could not find a target to detach");
2347 }
2348
2349 void
2350 target_disconnect (char *args, int from_tty)
2351 {
2352 struct target_ops *t;
2353
2354 /* If we're in breakpoints-always-inserted mode or if breakpoints
2355 are global across processes, we have to remove them before
2356 disconnecting. */
2357 remove_breakpoints ();
2358
2359 for (t = current_target.beneath; t != NULL; t = t->beneath)
2360 if (t->to_disconnect != NULL)
2361 {
2362 if (targetdebug)
2363 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2364 args, from_tty);
2365 t->to_disconnect (t, args, from_tty);
2366 return;
2367 }
2368
2369 tcomplain ();
2370 }
2371
2372 ptid_t
2373 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2374 {
2375 struct target_ops *t;
2376
2377 for (t = current_target.beneath; t != NULL; t = t->beneath)
2378 {
2379 if (t->to_wait != NULL)
2380 {
2381 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2382
2383 if (targetdebug)
2384 {
2385 char *status_string;
2386
2387 status_string = target_waitstatus_to_string (status);
2388 fprintf_unfiltered (gdb_stdlog,
2389 "target_wait (%d, status) = %d, %s\n",
2390 PIDGET (ptid), PIDGET (retval),
2391 status_string);
2392 xfree (status_string);
2393 }
2394
2395 return retval;
2396 }
2397 }
2398
2399 noprocess ();
2400 }
2401
2402 char *
2403 target_pid_to_str (ptid_t ptid)
2404 {
2405 struct target_ops *t;
2406
2407 for (t = current_target.beneath; t != NULL; t = t->beneath)
2408 {
2409 if (t->to_pid_to_str != NULL)
2410 return (*t->to_pid_to_str) (t, ptid);
2411 }
2412
2413 return normal_pid_to_str (ptid);
2414 }
2415
2416 void
2417 target_resume (ptid_t ptid, int step, enum target_signal signal)
2418 {
2419 struct target_ops *t;
2420
2421 target_dcache_invalidate ();
2422
2423 for (t = current_target.beneath; t != NULL; t = t->beneath)
2424 {
2425 if (t->to_resume != NULL)
2426 {
2427 t->to_resume (t, ptid, step, signal);
2428 if (targetdebug)
2429 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2430 PIDGET (ptid),
2431 step ? "step" : "continue",
2432 target_signal_to_name (signal));
2433
2434 registers_changed_ptid (ptid);
2435 set_executing (ptid, 1);
2436 set_running (ptid, 1);
2437 clear_inline_frame_state (ptid);
2438 return;
2439 }
2440 }
2441
2442 noprocess ();
2443 }
2444 /* Look through the list of possible targets for a target that can
2445 follow forks. */
2446
2447 int
2448 target_follow_fork (int follow_child)
2449 {
2450 struct target_ops *t;
2451
2452 for (t = current_target.beneath; t != NULL; t = t->beneath)
2453 {
2454 if (t->to_follow_fork != NULL)
2455 {
2456 int retval = t->to_follow_fork (t, follow_child);
2457
2458 if (targetdebug)
2459 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2460 follow_child, retval);
2461 return retval;
2462 }
2463 }
2464
2465 /* Some target returned a fork event, but did not know how to follow it. */
2466 internal_error (__FILE__, __LINE__,
2467 "could not find a target to follow fork");
2468 }
2469
2470 void
2471 target_mourn_inferior (void)
2472 {
2473 struct target_ops *t;
2474
2475 for (t = current_target.beneath; t != NULL; t = t->beneath)
2476 {
2477 if (t->to_mourn_inferior != NULL)
2478 {
2479 t->to_mourn_inferior (t);
2480 if (targetdebug)
2481 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2482
2483 /* We no longer need to keep handles on any of the object files.
2484 Make sure to release them to avoid unnecessarily locking any
2485 of them while we're not actually debugging. */
2486 bfd_cache_close_all ();
2487
2488 return;
2489 }
2490 }
2491
2492 internal_error (__FILE__, __LINE__,
2493 "could not find a target to follow mourn inferior");
2494 }
2495
2496 /* Look for a target which can describe architectural features, starting
2497 from TARGET. If we find one, return its description. */
2498
2499 const struct target_desc *
2500 target_read_description (struct target_ops *target)
2501 {
2502 struct target_ops *t;
2503
2504 for (t = target; t != NULL; t = t->beneath)
2505 if (t->to_read_description != NULL)
2506 {
2507 const struct target_desc *tdesc;
2508
2509 tdesc = t->to_read_description (t);
2510 if (tdesc)
2511 return tdesc;
2512 }
2513
2514 return NULL;
2515 }
2516
2517 /* The default implementation of to_search_memory.
2518 This implements a basic search of memory, reading target memory and
2519 performing the search here (as opposed to performing the search in on the
2520 target side with, for example, gdbserver). */
2521
2522 int
2523 simple_search_memory (struct target_ops *ops,
2524 CORE_ADDR start_addr, ULONGEST search_space_len,
2525 const gdb_byte *pattern, ULONGEST pattern_len,
2526 CORE_ADDR *found_addrp)
2527 {
2528 /* NOTE: also defined in find.c testcase. */
2529 #define SEARCH_CHUNK_SIZE 16000
2530 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2531 /* Buffer to hold memory contents for searching. */
2532 gdb_byte *search_buf;
2533 unsigned search_buf_size;
2534 struct cleanup *old_cleanups;
2535
2536 search_buf_size = chunk_size + pattern_len - 1;
2537
2538 /* No point in trying to allocate a buffer larger than the search space. */
2539 if (search_space_len < search_buf_size)
2540 search_buf_size = search_space_len;
2541
2542 search_buf = malloc (search_buf_size);
2543 if (search_buf == NULL)
2544 error (_("Unable to allocate memory to perform the search."));
2545 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2546
2547 /* Prime the search buffer. */
2548
2549 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2550 search_buf, start_addr, search_buf_size) != search_buf_size)
2551 {
2552 warning (_("Unable to access target memory at %s, halting search."),
2553 hex_string (start_addr));
2554 do_cleanups (old_cleanups);
2555 return -1;
2556 }
2557
2558 /* Perform the search.
2559
2560 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2561 When we've scanned N bytes we copy the trailing bytes to the start and
2562 read in another N bytes. */
2563
2564 while (search_space_len >= pattern_len)
2565 {
2566 gdb_byte *found_ptr;
2567 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2568
2569 found_ptr = memmem (search_buf, nr_search_bytes,
2570 pattern, pattern_len);
2571
2572 if (found_ptr != NULL)
2573 {
2574 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2575
2576 *found_addrp = found_addr;
2577 do_cleanups (old_cleanups);
2578 return 1;
2579 }
2580
2581 /* Not found in this chunk, skip to next chunk. */
2582
2583 /* Don't let search_space_len wrap here, it's unsigned. */
2584 if (search_space_len >= chunk_size)
2585 search_space_len -= chunk_size;
2586 else
2587 search_space_len = 0;
2588
2589 if (search_space_len >= pattern_len)
2590 {
2591 unsigned keep_len = search_buf_size - chunk_size;
2592 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2593 int nr_to_read;
2594
2595 /* Copy the trailing part of the previous iteration to the front
2596 of the buffer for the next iteration. */
2597 gdb_assert (keep_len == pattern_len - 1);
2598 memcpy (search_buf, search_buf + chunk_size, keep_len);
2599
2600 nr_to_read = min (search_space_len - keep_len, chunk_size);
2601
2602 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2603 search_buf + keep_len, read_addr,
2604 nr_to_read) != nr_to_read)
2605 {
2606 warning (_("Unable to access target memory at %s, halting search."),
2607 hex_string (read_addr));
2608 do_cleanups (old_cleanups);
2609 return -1;
2610 }
2611
2612 start_addr += chunk_size;
2613 }
2614 }
2615
2616 /* Not found. */
2617
2618 do_cleanups (old_cleanups);
2619 return 0;
2620 }
2621
2622 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2623 sequence of bytes in PATTERN with length PATTERN_LEN.
2624
2625 The result is 1 if found, 0 if not found, and -1 if there was an error
2626 requiring halting of the search (e.g. memory read error).
2627 If the pattern is found the address is recorded in FOUND_ADDRP. */
2628
2629 int
2630 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2631 const gdb_byte *pattern, ULONGEST pattern_len,
2632 CORE_ADDR *found_addrp)
2633 {
2634 struct target_ops *t;
2635 int found;
2636
2637 /* We don't use INHERIT to set current_target.to_search_memory,
2638 so we have to scan the target stack and handle targetdebug
2639 ourselves. */
2640
2641 if (targetdebug)
2642 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2643 hex_string (start_addr));
2644
2645 for (t = current_target.beneath; t != NULL; t = t->beneath)
2646 if (t->to_search_memory != NULL)
2647 break;
2648
2649 if (t != NULL)
2650 {
2651 found = t->to_search_memory (t, start_addr, search_space_len,
2652 pattern, pattern_len, found_addrp);
2653 }
2654 else
2655 {
2656 /* If a special version of to_search_memory isn't available, use the
2657 simple version. */
2658 found = simple_search_memory (current_target.beneath,
2659 start_addr, search_space_len,
2660 pattern, pattern_len, found_addrp);
2661 }
2662
2663 if (targetdebug)
2664 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2665
2666 return found;
2667 }
2668
2669 /* Look through the currently pushed targets. If none of them will
2670 be able to restart the currently running process, issue an error
2671 message. */
2672
2673 void
2674 target_require_runnable (void)
2675 {
2676 struct target_ops *t;
2677
2678 for (t = target_stack; t != NULL; t = t->beneath)
2679 {
2680 /* If this target knows how to create a new program, then
2681 assume we will still be able to after killing the current
2682 one. Either killing and mourning will not pop T, or else
2683 find_default_run_target will find it again. */
2684 if (t->to_create_inferior != NULL)
2685 return;
2686
2687 /* Do not worry about thread_stratum targets that can not
2688 create inferiors. Assume they will be pushed again if
2689 necessary, and continue to the process_stratum. */
2690 if (t->to_stratum == thread_stratum
2691 || t->to_stratum == arch_stratum)
2692 continue;
2693
2694 error (_("The \"%s\" target does not support \"run\". "
2695 "Try \"help target\" or \"continue\"."),
2696 t->to_shortname);
2697 }
2698
2699 /* This function is only called if the target is running. In that
2700 case there should have been a process_stratum target and it
2701 should either know how to create inferiors, or not... */
2702 internal_error (__FILE__, __LINE__, "No targets found");
2703 }
2704
2705 /* Look through the list of possible targets for a target that can
2706 execute a run or attach command without any other data. This is
2707 used to locate the default process stratum.
2708
2709 If DO_MESG is not NULL, the result is always valid (error() is
2710 called for errors); else, return NULL on error. */
2711
2712 static struct target_ops *
2713 find_default_run_target (char *do_mesg)
2714 {
2715 struct target_ops **t;
2716 struct target_ops *runable = NULL;
2717 int count;
2718
2719 count = 0;
2720
2721 for (t = target_structs; t < target_structs + target_struct_size;
2722 ++t)
2723 {
2724 if ((*t)->to_can_run && target_can_run (*t))
2725 {
2726 runable = *t;
2727 ++count;
2728 }
2729 }
2730
2731 if (count != 1)
2732 {
2733 if (do_mesg)
2734 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2735 else
2736 return NULL;
2737 }
2738
2739 return runable;
2740 }
2741
2742 void
2743 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2744 {
2745 struct target_ops *t;
2746
2747 t = find_default_run_target ("attach");
2748 (t->to_attach) (t, args, from_tty);
2749 return;
2750 }
2751
2752 void
2753 find_default_create_inferior (struct target_ops *ops,
2754 char *exec_file, char *allargs, char **env,
2755 int from_tty)
2756 {
2757 struct target_ops *t;
2758
2759 t = find_default_run_target ("run");
2760 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2761 return;
2762 }
2763
2764 static int
2765 find_default_can_async_p (void)
2766 {
2767 struct target_ops *t;
2768
2769 /* This may be called before the target is pushed on the stack;
2770 look for the default process stratum. If there's none, gdb isn't
2771 configured with a native debugger, and target remote isn't
2772 connected yet. */
2773 t = find_default_run_target (NULL);
2774 if (t && t->to_can_async_p)
2775 return (t->to_can_async_p) ();
2776 return 0;
2777 }
2778
2779 static int
2780 find_default_is_async_p (void)
2781 {
2782 struct target_ops *t;
2783
2784 /* This may be called before the target is pushed on the stack;
2785 look for the default process stratum. If there's none, gdb isn't
2786 configured with a native debugger, and target remote isn't
2787 connected yet. */
2788 t = find_default_run_target (NULL);
2789 if (t && t->to_is_async_p)
2790 return (t->to_is_async_p) ();
2791 return 0;
2792 }
2793
2794 static int
2795 find_default_supports_non_stop (void)
2796 {
2797 struct target_ops *t;
2798
2799 t = find_default_run_target (NULL);
2800 if (t && t->to_supports_non_stop)
2801 return (t->to_supports_non_stop) ();
2802 return 0;
2803 }
2804
2805 int
2806 target_supports_non_stop (void)
2807 {
2808 struct target_ops *t;
2809
2810 for (t = &current_target; t != NULL; t = t->beneath)
2811 if (t->to_supports_non_stop)
2812 return t->to_supports_non_stop ();
2813
2814 return 0;
2815 }
2816
2817
2818 char *
2819 target_get_osdata (const char *type)
2820 {
2821 struct target_ops *t;
2822
2823 /* If we're already connected to something that can get us OS
2824 related data, use it. Otherwise, try using the native
2825 target. */
2826 if (current_target.to_stratum >= process_stratum)
2827 t = current_target.beneath;
2828 else
2829 t = find_default_run_target ("get OS data");
2830
2831 if (!t)
2832 return NULL;
2833
2834 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2835 }
2836
2837 /* Determine the current address space of thread PTID. */
2838
2839 struct address_space *
2840 target_thread_address_space (ptid_t ptid)
2841 {
2842 struct address_space *aspace;
2843 struct inferior *inf;
2844 struct target_ops *t;
2845
2846 for (t = current_target.beneath; t != NULL; t = t->beneath)
2847 {
2848 if (t->to_thread_address_space != NULL)
2849 {
2850 aspace = t->to_thread_address_space (t, ptid);
2851 gdb_assert (aspace);
2852
2853 if (targetdebug)
2854 fprintf_unfiltered (gdb_stdlog,
2855 "target_thread_address_space (%s) = %d\n",
2856 target_pid_to_str (ptid),
2857 address_space_num (aspace));
2858 return aspace;
2859 }
2860 }
2861
2862 /* Fall-back to the "main" address space of the inferior. */
2863 inf = find_inferior_pid (ptid_get_pid (ptid));
2864
2865 if (inf == NULL || inf->aspace == NULL)
2866 internal_error (__FILE__, __LINE__,
2867 "Can't determine the current address space of thread %s\n",
2868 target_pid_to_str (ptid));
2869
2870 return inf->aspace;
2871 }
2872
2873 static int
2874 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2875 {
2876 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2877 }
2878
2879 static int
2880 default_watchpoint_addr_within_range (struct target_ops *target,
2881 CORE_ADDR addr,
2882 CORE_ADDR start, int length)
2883 {
2884 return addr >= start && addr < start + length;
2885 }
2886
2887 static struct gdbarch *
2888 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2889 {
2890 return target_gdbarch;
2891 }
2892
2893 static int
2894 return_zero (void)
2895 {
2896 return 0;
2897 }
2898
2899 static int
2900 return_one (void)
2901 {
2902 return 1;
2903 }
2904
2905 static int
2906 return_minus_one (void)
2907 {
2908 return -1;
2909 }
2910
2911 /* Find a single runnable target in the stack and return it. If for
2912 some reason there is more than one, return NULL. */
2913
2914 struct target_ops *
2915 find_run_target (void)
2916 {
2917 struct target_ops **t;
2918 struct target_ops *runable = NULL;
2919 int count;
2920
2921 count = 0;
2922
2923 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2924 {
2925 if ((*t)->to_can_run && target_can_run (*t))
2926 {
2927 runable = *t;
2928 ++count;
2929 }
2930 }
2931
2932 return (count == 1 ? runable : NULL);
2933 }
2934
2935 /*
2936 * Find the next target down the stack from the specified target.
2937 */
2938
2939 struct target_ops *
2940 find_target_beneath (struct target_ops *t)
2941 {
2942 return t->beneath;
2943 }
2944
2945 \f
2946 /* The inferior process has died. Long live the inferior! */
2947
2948 void
2949 generic_mourn_inferior (void)
2950 {
2951 ptid_t ptid;
2952
2953 ptid = inferior_ptid;
2954 inferior_ptid = null_ptid;
2955
2956 if (!ptid_equal (ptid, null_ptid))
2957 {
2958 int pid = ptid_get_pid (ptid);
2959 exit_inferior (pid);
2960 }
2961
2962 breakpoint_init_inferior (inf_exited);
2963 registers_changed ();
2964
2965 reopen_exec_file ();
2966 reinit_frame_cache ();
2967
2968 if (deprecated_detach_hook)
2969 deprecated_detach_hook ();
2970 }
2971 \f
2972 /* Helper function for child_wait and the derivatives of child_wait.
2973 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2974 translation of that in OURSTATUS. */
2975 void
2976 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2977 {
2978 if (WIFEXITED (hoststatus))
2979 {
2980 ourstatus->kind = TARGET_WAITKIND_EXITED;
2981 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2982 }
2983 else if (!WIFSTOPPED (hoststatus))
2984 {
2985 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2986 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2987 }
2988 else
2989 {
2990 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2991 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2992 }
2993 }
2994 \f
2995 /* Convert a normal process ID to a string. Returns the string in a
2996 static buffer. */
2997
2998 char *
2999 normal_pid_to_str (ptid_t ptid)
3000 {
3001 static char buf[32];
3002
3003 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3004 return buf;
3005 }
3006
3007 static char *
3008 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3009 {
3010 return normal_pid_to_str (ptid);
3011 }
3012
3013 /* Error-catcher for target_find_memory_regions. */
3014 static int
3015 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3016 {
3017 error (_("Command not implemented for this target."));
3018 return 0;
3019 }
3020
3021 /* Error-catcher for target_make_corefile_notes. */
3022 static char *
3023 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3024 {
3025 error (_("Command not implemented for this target."));
3026 return NULL;
3027 }
3028
3029 /* Error-catcher for target_get_bookmark. */
3030 static gdb_byte *
3031 dummy_get_bookmark (char *ignore1, int ignore2)
3032 {
3033 tcomplain ();
3034 return NULL;
3035 }
3036
3037 /* Error-catcher for target_goto_bookmark. */
3038 static void
3039 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3040 {
3041 tcomplain ();
3042 }
3043
3044 /* Set up the handful of non-empty slots needed by the dummy target
3045 vector. */
3046
3047 static void
3048 init_dummy_target (void)
3049 {
3050 dummy_target.to_shortname = "None";
3051 dummy_target.to_longname = "None";
3052 dummy_target.to_doc = "";
3053 dummy_target.to_attach = find_default_attach;
3054 dummy_target.to_detach =
3055 (void (*)(struct target_ops *, char *, int))target_ignore;
3056 dummy_target.to_create_inferior = find_default_create_inferior;
3057 dummy_target.to_can_async_p = find_default_can_async_p;
3058 dummy_target.to_is_async_p = find_default_is_async_p;
3059 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3060 dummy_target.to_pid_to_str = dummy_pid_to_str;
3061 dummy_target.to_stratum = dummy_stratum;
3062 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3063 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3064 dummy_target.to_get_bookmark = dummy_get_bookmark;
3065 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3066 dummy_target.to_xfer_partial = default_xfer_partial;
3067 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3068 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3069 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3070 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3071 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
3072 dummy_target.to_stopped_by_watchpoint = return_zero;
3073 dummy_target.to_stopped_data_address =
3074 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3075 dummy_target.to_magic = OPS_MAGIC;
3076 }
3077 \f
3078 static void
3079 debug_to_open (char *args, int from_tty)
3080 {
3081 debug_target.to_open (args, from_tty);
3082
3083 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3084 }
3085
3086 void
3087 target_close (struct target_ops *targ, int quitting)
3088 {
3089 if (targ->to_xclose != NULL)
3090 targ->to_xclose (targ, quitting);
3091 else if (targ->to_close != NULL)
3092 targ->to_close (quitting);
3093
3094 if (targetdebug)
3095 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3096 }
3097
3098 void
3099 target_attach (char *args, int from_tty)
3100 {
3101 struct target_ops *t;
3102
3103 for (t = current_target.beneath; t != NULL; t = t->beneath)
3104 {
3105 if (t->to_attach != NULL)
3106 {
3107 t->to_attach (t, args, from_tty);
3108 if (targetdebug)
3109 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3110 args, from_tty);
3111 return;
3112 }
3113 }
3114
3115 internal_error (__FILE__, __LINE__,
3116 "could not find a target to attach");
3117 }
3118
3119 int
3120 target_thread_alive (ptid_t ptid)
3121 {
3122 struct target_ops *t;
3123
3124 for (t = current_target.beneath; t != NULL; t = t->beneath)
3125 {
3126 if (t->to_thread_alive != NULL)
3127 {
3128 int retval;
3129
3130 retval = t->to_thread_alive (t, ptid);
3131 if (targetdebug)
3132 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3133 PIDGET (ptid), retval);
3134
3135 return retval;
3136 }
3137 }
3138
3139 return 0;
3140 }
3141
3142 void
3143 target_find_new_threads (void)
3144 {
3145 struct target_ops *t;
3146
3147 for (t = current_target.beneath; t != NULL; t = t->beneath)
3148 {
3149 if (t->to_find_new_threads != NULL)
3150 {
3151 t->to_find_new_threads (t);
3152 if (targetdebug)
3153 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3154
3155 return;
3156 }
3157 }
3158 }
3159
3160 void
3161 target_stop (ptid_t ptid)
3162 {
3163 if (!may_stop)
3164 {
3165 warning (_("May not interrupt or stop the target, ignoring attempt"));
3166 return;
3167 }
3168
3169 (*current_target.to_stop) (ptid);
3170 }
3171
3172 static void
3173 debug_to_post_attach (int pid)
3174 {
3175 debug_target.to_post_attach (pid);
3176
3177 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3178 }
3179
3180 /* Return a pretty printed form of target_waitstatus.
3181 Space for the result is malloc'd, caller must free. */
3182
3183 char *
3184 target_waitstatus_to_string (const struct target_waitstatus *ws)
3185 {
3186 const char *kind_str = "status->kind = ";
3187
3188 switch (ws->kind)
3189 {
3190 case TARGET_WAITKIND_EXITED:
3191 return xstrprintf ("%sexited, status = %d",
3192 kind_str, ws->value.integer);
3193 case TARGET_WAITKIND_STOPPED:
3194 return xstrprintf ("%sstopped, signal = %s",
3195 kind_str, target_signal_to_name (ws->value.sig));
3196 case TARGET_WAITKIND_SIGNALLED:
3197 return xstrprintf ("%ssignalled, signal = %s",
3198 kind_str, target_signal_to_name (ws->value.sig));
3199 case TARGET_WAITKIND_LOADED:
3200 return xstrprintf ("%sloaded", kind_str);
3201 case TARGET_WAITKIND_FORKED:
3202 return xstrprintf ("%sforked", kind_str);
3203 case TARGET_WAITKIND_VFORKED:
3204 return xstrprintf ("%svforked", kind_str);
3205 case TARGET_WAITKIND_EXECD:
3206 return xstrprintf ("%sexecd", kind_str);
3207 case TARGET_WAITKIND_SYSCALL_ENTRY:
3208 return xstrprintf ("%sentered syscall", kind_str);
3209 case TARGET_WAITKIND_SYSCALL_RETURN:
3210 return xstrprintf ("%sexited syscall", kind_str);
3211 case TARGET_WAITKIND_SPURIOUS:
3212 return xstrprintf ("%sspurious", kind_str);
3213 case TARGET_WAITKIND_IGNORE:
3214 return xstrprintf ("%signore", kind_str);
3215 case TARGET_WAITKIND_NO_HISTORY:
3216 return xstrprintf ("%sno-history", kind_str);
3217 default:
3218 return xstrprintf ("%sunknown???", kind_str);
3219 }
3220 }
3221
3222 static void
3223 debug_print_register (const char * func,
3224 struct regcache *regcache, int regno)
3225 {
3226 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3227
3228 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3229 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3230 && gdbarch_register_name (gdbarch, regno) != NULL
3231 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3232 fprintf_unfiltered (gdb_stdlog, "(%s)",
3233 gdbarch_register_name (gdbarch, regno));
3234 else
3235 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3236 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3237 {
3238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3239 int i, size = register_size (gdbarch, regno);
3240 unsigned char buf[MAX_REGISTER_SIZE];
3241
3242 regcache_raw_collect (regcache, regno, buf);
3243 fprintf_unfiltered (gdb_stdlog, " = ");
3244 for (i = 0; i < size; i++)
3245 {
3246 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3247 }
3248 if (size <= sizeof (LONGEST))
3249 {
3250 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3251
3252 fprintf_unfiltered (gdb_stdlog, " %s %s",
3253 core_addr_to_string_nz (val), plongest (val));
3254 }
3255 }
3256 fprintf_unfiltered (gdb_stdlog, "\n");
3257 }
3258
3259 void
3260 target_fetch_registers (struct regcache *regcache, int regno)
3261 {
3262 struct target_ops *t;
3263
3264 for (t = current_target.beneath; t != NULL; t = t->beneath)
3265 {
3266 if (t->to_fetch_registers != NULL)
3267 {
3268 t->to_fetch_registers (t, regcache, regno);
3269 if (targetdebug)
3270 debug_print_register ("target_fetch_registers", regcache, regno);
3271 return;
3272 }
3273 }
3274 }
3275
3276 void
3277 target_store_registers (struct regcache *regcache, int regno)
3278 {
3279 struct target_ops *t;
3280
3281 if (!may_write_registers)
3282 error (_("Writing to registers is not allowed (regno %d)"), regno);
3283
3284 for (t = current_target.beneath; t != NULL; t = t->beneath)
3285 {
3286 if (t->to_store_registers != NULL)
3287 {
3288 t->to_store_registers (t, regcache, regno);
3289 if (targetdebug)
3290 {
3291 debug_print_register ("target_store_registers", regcache, regno);
3292 }
3293 return;
3294 }
3295 }
3296
3297 noprocess ();
3298 }
3299
3300 int
3301 target_core_of_thread (ptid_t ptid)
3302 {
3303 struct target_ops *t;
3304
3305 for (t = current_target.beneath; t != NULL; t = t->beneath)
3306 {
3307 if (t->to_core_of_thread != NULL)
3308 {
3309 int retval = t->to_core_of_thread (t, ptid);
3310
3311 if (targetdebug)
3312 fprintf_unfiltered (gdb_stdlog,
3313 "target_core_of_thread (%d) = %d\n",
3314 PIDGET (ptid), retval);
3315 return retval;
3316 }
3317 }
3318
3319 return -1;
3320 }
3321
3322 int
3323 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3324 {
3325 struct target_ops *t;
3326
3327 for (t = current_target.beneath; t != NULL; t = t->beneath)
3328 {
3329 if (t->to_verify_memory != NULL)
3330 {
3331 int retval = t->to_verify_memory (t, data, memaddr, size);
3332
3333 if (targetdebug)
3334 fprintf_unfiltered (gdb_stdlog,
3335 "target_verify_memory (%s, %s) = %d\n",
3336 paddress (target_gdbarch, memaddr),
3337 pulongest (size),
3338 retval);
3339 return retval;
3340 }
3341 }
3342
3343 tcomplain ();
3344 }
3345
3346 static void
3347 debug_to_prepare_to_store (struct regcache *regcache)
3348 {
3349 debug_target.to_prepare_to_store (regcache);
3350
3351 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3352 }
3353
3354 static int
3355 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3356 int write, struct mem_attrib *attrib,
3357 struct target_ops *target)
3358 {
3359 int retval;
3360
3361 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3362 attrib, target);
3363
3364 fprintf_unfiltered (gdb_stdlog,
3365 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3366 paddress (target_gdbarch, memaddr), len,
3367 write ? "write" : "read", retval);
3368
3369 if (retval > 0)
3370 {
3371 int i;
3372
3373 fputs_unfiltered (", bytes =", gdb_stdlog);
3374 for (i = 0; i < retval; i++)
3375 {
3376 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3377 {
3378 if (targetdebug < 2 && i > 0)
3379 {
3380 fprintf_unfiltered (gdb_stdlog, " ...");
3381 break;
3382 }
3383 fprintf_unfiltered (gdb_stdlog, "\n");
3384 }
3385
3386 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3387 }
3388 }
3389
3390 fputc_unfiltered ('\n', gdb_stdlog);
3391
3392 return retval;
3393 }
3394
3395 static void
3396 debug_to_files_info (struct target_ops *target)
3397 {
3398 debug_target.to_files_info (target);
3399
3400 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3401 }
3402
3403 static int
3404 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3405 struct bp_target_info *bp_tgt)
3406 {
3407 int retval;
3408
3409 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3410
3411 fprintf_unfiltered (gdb_stdlog,
3412 "target_insert_breakpoint (%s, xxx) = %ld\n",
3413 core_addr_to_string (bp_tgt->placed_address),
3414 (unsigned long) retval);
3415 return retval;
3416 }
3417
3418 static int
3419 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3420 struct bp_target_info *bp_tgt)
3421 {
3422 int retval;
3423
3424 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3425
3426 fprintf_unfiltered (gdb_stdlog,
3427 "target_remove_breakpoint (%s, xxx) = %ld\n",
3428 core_addr_to_string (bp_tgt->placed_address),
3429 (unsigned long) retval);
3430 return retval;
3431 }
3432
3433 static int
3434 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3435 {
3436 int retval;
3437
3438 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3439
3440 fprintf_unfiltered (gdb_stdlog,
3441 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3442 (unsigned long) type,
3443 (unsigned long) cnt,
3444 (unsigned long) from_tty,
3445 (unsigned long) retval);
3446 return retval;
3447 }
3448
3449 static int
3450 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3451 {
3452 CORE_ADDR retval;
3453
3454 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3455
3456 fprintf_unfiltered (gdb_stdlog,
3457 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3458 core_addr_to_string (addr), (unsigned long) len,
3459 core_addr_to_string (retval));
3460 return retval;
3461 }
3462
3463 static int
3464 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
3465 struct expression *cond)
3466 {
3467 int retval;
3468
3469 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
3470 rw, cond);
3471
3472 fprintf_unfiltered (gdb_stdlog,
3473 "target_can_accel_watchpoint_condition "
3474 "(%s, %d, %d, %s) = %ld\n",
3475 core_addr_to_string (addr), len, rw,
3476 host_address_to_string (cond), (unsigned long) retval);
3477 return retval;
3478 }
3479
3480 static int
3481 debug_to_stopped_by_watchpoint (void)
3482 {
3483 int retval;
3484
3485 retval = debug_target.to_stopped_by_watchpoint ();
3486
3487 fprintf_unfiltered (gdb_stdlog,
3488 "target_stopped_by_watchpoint () = %ld\n",
3489 (unsigned long) retval);
3490 return retval;
3491 }
3492
3493 static int
3494 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3495 {
3496 int retval;
3497
3498 retval = debug_target.to_stopped_data_address (target, addr);
3499
3500 fprintf_unfiltered (gdb_stdlog,
3501 "target_stopped_data_address ([%s]) = %ld\n",
3502 core_addr_to_string (*addr),
3503 (unsigned long)retval);
3504 return retval;
3505 }
3506
3507 static int
3508 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3509 CORE_ADDR addr,
3510 CORE_ADDR start, int length)
3511 {
3512 int retval;
3513
3514 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3515 start, length);
3516
3517 fprintf_filtered (gdb_stdlog,
3518 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3519 core_addr_to_string (addr), core_addr_to_string (start),
3520 length, retval);
3521 return retval;
3522 }
3523
3524 static int
3525 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3526 struct bp_target_info *bp_tgt)
3527 {
3528 int retval;
3529
3530 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3531
3532 fprintf_unfiltered (gdb_stdlog,
3533 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3534 core_addr_to_string (bp_tgt->placed_address),
3535 (unsigned long) retval);
3536 return retval;
3537 }
3538
3539 static int
3540 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3541 struct bp_target_info *bp_tgt)
3542 {
3543 int retval;
3544
3545 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3546
3547 fprintf_unfiltered (gdb_stdlog,
3548 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3549 core_addr_to_string (bp_tgt->placed_address),
3550 (unsigned long) retval);
3551 return retval;
3552 }
3553
3554 static int
3555 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
3556 struct expression *cond)
3557 {
3558 int retval;
3559
3560 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
3561
3562 fprintf_unfiltered (gdb_stdlog,
3563 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3564 core_addr_to_string (addr), len, type,
3565 host_address_to_string (cond), (unsigned long) retval);
3566 return retval;
3567 }
3568
3569 static int
3570 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
3571 struct expression *cond)
3572 {
3573 int retval;
3574
3575 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
3576
3577 fprintf_unfiltered (gdb_stdlog,
3578 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3579 core_addr_to_string (addr), len, type,
3580 host_address_to_string (cond), (unsigned long) retval);
3581 return retval;
3582 }
3583
3584 static void
3585 debug_to_terminal_init (void)
3586 {
3587 debug_target.to_terminal_init ();
3588
3589 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3590 }
3591
3592 static void
3593 debug_to_terminal_inferior (void)
3594 {
3595 debug_target.to_terminal_inferior ();
3596
3597 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3598 }
3599
3600 static void
3601 debug_to_terminal_ours_for_output (void)
3602 {
3603 debug_target.to_terminal_ours_for_output ();
3604
3605 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3606 }
3607
3608 static void
3609 debug_to_terminal_ours (void)
3610 {
3611 debug_target.to_terminal_ours ();
3612
3613 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3614 }
3615
3616 static void
3617 debug_to_terminal_save_ours (void)
3618 {
3619 debug_target.to_terminal_save_ours ();
3620
3621 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3622 }
3623
3624 static void
3625 debug_to_terminal_info (char *arg, int from_tty)
3626 {
3627 debug_target.to_terminal_info (arg, from_tty);
3628
3629 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3630 from_tty);
3631 }
3632
3633 static void
3634 debug_to_load (char *args, int from_tty)
3635 {
3636 debug_target.to_load (args, from_tty);
3637
3638 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3639 }
3640
3641 static int
3642 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3643 {
3644 int retval;
3645
3646 retval = debug_target.to_lookup_symbol (name, addrp);
3647
3648 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3649
3650 return retval;
3651 }
3652
3653 static void
3654 debug_to_post_startup_inferior (ptid_t ptid)
3655 {
3656 debug_target.to_post_startup_inferior (ptid);
3657
3658 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3659 PIDGET (ptid));
3660 }
3661
3662 static void
3663 debug_to_insert_fork_catchpoint (int pid)
3664 {
3665 debug_target.to_insert_fork_catchpoint (pid);
3666
3667 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d)\n",
3668 pid);
3669 }
3670
3671 static int
3672 debug_to_remove_fork_catchpoint (int pid)
3673 {
3674 int retval;
3675
3676 retval = debug_target.to_remove_fork_catchpoint (pid);
3677
3678 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3679 pid, retval);
3680
3681 return retval;
3682 }
3683
3684 static void
3685 debug_to_insert_vfork_catchpoint (int pid)
3686 {
3687 debug_target.to_insert_vfork_catchpoint (pid);
3688
3689 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d)\n",
3690 pid);
3691 }
3692
3693 static int
3694 debug_to_remove_vfork_catchpoint (int pid)
3695 {
3696 int retval;
3697
3698 retval = debug_target.to_remove_vfork_catchpoint (pid);
3699
3700 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3701 pid, retval);
3702
3703 return retval;
3704 }
3705
3706 static void
3707 debug_to_insert_exec_catchpoint (int pid)
3708 {
3709 debug_target.to_insert_exec_catchpoint (pid);
3710
3711 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d)\n",
3712 pid);
3713 }
3714
3715 static int
3716 debug_to_remove_exec_catchpoint (int pid)
3717 {
3718 int retval;
3719
3720 retval = debug_target.to_remove_exec_catchpoint (pid);
3721
3722 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3723 pid, retval);
3724
3725 return retval;
3726 }
3727
3728 static int
3729 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3730 {
3731 int has_exited;
3732
3733 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3734
3735 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3736 pid, wait_status, *exit_status, has_exited);
3737
3738 return has_exited;
3739 }
3740
3741 static int
3742 debug_to_can_run (void)
3743 {
3744 int retval;
3745
3746 retval = debug_target.to_can_run ();
3747
3748 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3749
3750 return retval;
3751 }
3752
3753 static void
3754 debug_to_notice_signals (ptid_t ptid)
3755 {
3756 debug_target.to_notice_signals (ptid);
3757
3758 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3759 PIDGET (ptid));
3760 }
3761
3762 static struct gdbarch *
3763 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3764 {
3765 struct gdbarch *retval;
3766
3767 retval = debug_target.to_thread_architecture (ops, ptid);
3768
3769 fprintf_unfiltered (gdb_stdlog,
3770 "target_thread_architecture (%s) = %s [%s]\n",
3771 target_pid_to_str (ptid),
3772 host_address_to_string (retval),
3773 gdbarch_bfd_arch_info (retval)->printable_name);
3774 return retval;
3775 }
3776
3777 static void
3778 debug_to_stop (ptid_t ptid)
3779 {
3780 debug_target.to_stop (ptid);
3781
3782 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3783 target_pid_to_str (ptid));
3784 }
3785
3786 static void
3787 debug_to_rcmd (char *command,
3788 struct ui_file *outbuf)
3789 {
3790 debug_target.to_rcmd (command, outbuf);
3791 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3792 }
3793
3794 static char *
3795 debug_to_pid_to_exec_file (int pid)
3796 {
3797 char *exec_file;
3798
3799 exec_file = debug_target.to_pid_to_exec_file (pid);
3800
3801 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3802 pid, exec_file);
3803
3804 return exec_file;
3805 }
3806
3807 static void
3808 setup_target_debug (void)
3809 {
3810 memcpy (&debug_target, &current_target, sizeof debug_target);
3811
3812 current_target.to_open = debug_to_open;
3813 current_target.to_post_attach = debug_to_post_attach;
3814 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3815 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3816 current_target.to_files_info = debug_to_files_info;
3817 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3818 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3819 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3820 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3821 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3822 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3823 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3824 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3825 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3826 current_target.to_watchpoint_addr_within_range
3827 = debug_to_watchpoint_addr_within_range;
3828 current_target.to_region_ok_for_hw_watchpoint
3829 = debug_to_region_ok_for_hw_watchpoint;
3830 current_target.to_can_accel_watchpoint_condition
3831 = debug_to_can_accel_watchpoint_condition;
3832 current_target.to_terminal_init = debug_to_terminal_init;
3833 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3834 current_target.to_terminal_ours_for_output
3835 = debug_to_terminal_ours_for_output;
3836 current_target.to_terminal_ours = debug_to_terminal_ours;
3837 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3838 current_target.to_terminal_info = debug_to_terminal_info;
3839 current_target.to_load = debug_to_load;
3840 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3841 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3842 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3843 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3844 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3845 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3846 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3847 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3848 current_target.to_has_exited = debug_to_has_exited;
3849 current_target.to_can_run = debug_to_can_run;
3850 current_target.to_notice_signals = debug_to_notice_signals;
3851 current_target.to_stop = debug_to_stop;
3852 current_target.to_rcmd = debug_to_rcmd;
3853 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3854 current_target.to_thread_architecture = debug_to_thread_architecture;
3855 }
3856 \f
3857
3858 static char targ_desc[] =
3859 "Names of targets and files being debugged.\nShows the entire \
3860 stack of targets currently in use (including the exec-file,\n\
3861 core-file, and process, if any), as well as the symbol file name.";
3862
3863 static void
3864 do_monitor_command (char *cmd,
3865 int from_tty)
3866 {
3867 if ((current_target.to_rcmd
3868 == (void (*) (char *, struct ui_file *)) tcomplain)
3869 || (current_target.to_rcmd == debug_to_rcmd
3870 && (debug_target.to_rcmd
3871 == (void (*) (char *, struct ui_file *)) tcomplain)))
3872 error (_("\"monitor\" command not supported by this target."));
3873 target_rcmd (cmd, gdb_stdtarg);
3874 }
3875
3876 /* Print the name of each layers of our target stack. */
3877
3878 static void
3879 maintenance_print_target_stack (char *cmd, int from_tty)
3880 {
3881 struct target_ops *t;
3882
3883 printf_filtered (_("The current target stack is:\n"));
3884
3885 for (t = target_stack; t != NULL; t = t->beneath)
3886 {
3887 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3888 }
3889 }
3890
3891 /* Controls if async mode is permitted. */
3892 int target_async_permitted = 0;
3893
3894 /* The set command writes to this variable. If the inferior is
3895 executing, linux_nat_async_permitted is *not* updated. */
3896 static int target_async_permitted_1 = 0;
3897
3898 static void
3899 set_maintenance_target_async_permitted (char *args, int from_tty,
3900 struct cmd_list_element *c)
3901 {
3902 if (have_live_inferiors ())
3903 {
3904 target_async_permitted_1 = target_async_permitted;
3905 error (_("Cannot change this setting while the inferior is running."));
3906 }
3907
3908 target_async_permitted = target_async_permitted_1;
3909 }
3910
3911 static void
3912 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3913 struct cmd_list_element *c,
3914 const char *value)
3915 {
3916 fprintf_filtered (file,
3917 _("Controlling the inferior in "
3918 "asynchronous mode is %s.\n"), value);
3919 }
3920
3921 /* Temporary copies of permission settings. */
3922
3923 static int may_write_registers_1 = 1;
3924 static int may_write_memory_1 = 1;
3925 static int may_insert_breakpoints_1 = 1;
3926 static int may_insert_tracepoints_1 = 1;
3927 static int may_insert_fast_tracepoints_1 = 1;
3928 static int may_stop_1 = 1;
3929
3930 /* Make the user-set values match the real values again. */
3931
3932 void
3933 update_target_permissions (void)
3934 {
3935 may_write_registers_1 = may_write_registers;
3936 may_write_memory_1 = may_write_memory;
3937 may_insert_breakpoints_1 = may_insert_breakpoints;
3938 may_insert_tracepoints_1 = may_insert_tracepoints;
3939 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3940 may_stop_1 = may_stop;
3941 }
3942
3943 /* The one function handles (most of) the permission flags in the same
3944 way. */
3945
3946 static void
3947 set_target_permissions (char *args, int from_tty,
3948 struct cmd_list_element *c)
3949 {
3950 if (target_has_execution)
3951 {
3952 update_target_permissions ();
3953 error (_("Cannot change this setting while the inferior is running."));
3954 }
3955
3956 /* Make the real values match the user-changed values. */
3957 may_write_registers = may_write_registers_1;
3958 may_insert_breakpoints = may_insert_breakpoints_1;
3959 may_insert_tracepoints = may_insert_tracepoints_1;
3960 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3961 may_stop = may_stop_1;
3962 update_observer_mode ();
3963 }
3964
3965 /* Set memory write permission independently of observer mode. */
3966
3967 static void
3968 set_write_memory_permission (char *args, int from_tty,
3969 struct cmd_list_element *c)
3970 {
3971 /* Make the real values match the user-changed values. */
3972 may_write_memory = may_write_memory_1;
3973 update_observer_mode ();
3974 }
3975
3976
3977 void
3978 initialize_targets (void)
3979 {
3980 init_dummy_target ();
3981 push_target (&dummy_target);
3982
3983 add_info ("target", target_info, targ_desc);
3984 add_info ("files", target_info, targ_desc);
3985
3986 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3987 Set target debugging."), _("\
3988 Show target debugging."), _("\
3989 When non-zero, target debugging is enabled. Higher numbers are more\n\
3990 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
3991 command."),
3992 NULL,
3993 show_targetdebug,
3994 &setdebuglist, &showdebuglist);
3995
3996 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3997 &trust_readonly, _("\
3998 Set mode for reading from readonly sections."), _("\
3999 Show mode for reading from readonly sections."), _("\
4000 When this mode is on, memory reads from readonly sections (such as .text)\n\
4001 will be read from the object file instead of from the target. This will\n\
4002 result in significant performance improvement for remote targets."),
4003 NULL,
4004 show_trust_readonly,
4005 &setlist, &showlist);
4006
4007 add_com ("monitor", class_obscure, do_monitor_command,
4008 _("Send a command to the remote monitor (remote targets only)."));
4009
4010 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4011 _("Print the name of each layer of the internal target stack."),
4012 &maintenanceprintlist);
4013
4014 add_setshow_boolean_cmd ("target-async", no_class,
4015 &target_async_permitted_1, _("\
4016 Set whether gdb controls the inferior in asynchronous mode."), _("\
4017 Show whether gdb controls the inferior in asynchronous mode."), _("\
4018 Tells gdb whether to control the inferior in asynchronous mode."),
4019 set_maintenance_target_async_permitted,
4020 show_maintenance_target_async_permitted,
4021 &setlist,
4022 &showlist);
4023
4024 add_setshow_boolean_cmd ("stack-cache", class_support,
4025 &stack_cache_enabled_p_1, _("\
4026 Set cache use for stack access."), _("\
4027 Show cache use for stack access."), _("\
4028 When on, use the data cache for all stack access, regardless of any\n\
4029 configured memory regions. This improves remote performance significantly.\n\
4030 By default, caching for stack access is on."),
4031 set_stack_cache_enabled_p,
4032 show_stack_cache_enabled_p,
4033 &setlist, &showlist);
4034
4035 add_setshow_boolean_cmd ("may-write-registers", class_support,
4036 &may_write_registers_1, _("\
4037 Set permission to write into registers."), _("\
4038 Show permission to write into registers."), _("\
4039 When this permission is on, GDB may write into the target's registers.\n\
4040 Otherwise, any sort of write attempt will result in an error."),
4041 set_target_permissions, NULL,
4042 &setlist, &showlist);
4043
4044 add_setshow_boolean_cmd ("may-write-memory", class_support,
4045 &may_write_memory_1, _("\
4046 Set permission to write into target memory."), _("\
4047 Show permission to write into target memory."), _("\
4048 When this permission is on, GDB may write into the target's memory.\n\
4049 Otherwise, any sort of write attempt will result in an error."),
4050 set_write_memory_permission, NULL,
4051 &setlist, &showlist);
4052
4053 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4054 &may_insert_breakpoints_1, _("\
4055 Set permission to insert breakpoints in the target."), _("\
4056 Show permission to insert breakpoints in the target."), _("\
4057 When this permission is on, GDB may insert breakpoints in the program.\n\
4058 Otherwise, any sort of insertion attempt will result in an error."),
4059 set_target_permissions, NULL,
4060 &setlist, &showlist);
4061
4062 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4063 &may_insert_tracepoints_1, _("\
4064 Set permission to insert tracepoints in the target."), _("\
4065 Show permission to insert tracepoints in the target."), _("\
4066 When this permission is on, GDB may insert tracepoints in the program.\n\
4067 Otherwise, any sort of insertion attempt will result in an error."),
4068 set_target_permissions, NULL,
4069 &setlist, &showlist);
4070
4071 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4072 &may_insert_fast_tracepoints_1, _("\
4073 Set permission to insert fast tracepoints in the target."), _("\
4074 Show permission to insert fast tracepoints in the target."), _("\
4075 When this permission is on, GDB may insert fast tracepoints.\n\
4076 Otherwise, any sort of insertion attempt will result in an error."),
4077 set_target_permissions, NULL,
4078 &setlist, &showlist);
4079
4080 add_setshow_boolean_cmd ("may-interrupt", class_support,
4081 &may_stop_1, _("\
4082 Set permission to interrupt or signal the target."), _("\
4083 Show permission to interrupt or signal the target."), _("\
4084 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4085 Otherwise, any attempt to interrupt or stop will be ignored."),
4086 set_target_permissions, NULL,
4087 &setlist, &showlist);
4088
4089
4090 target_dcache = dcache_init ();
4091 }
This page took 0.11538 seconds and 4 git commands to generate.