1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
63 static void tcomplain (void) ATTRIBUTE_NORETURN
;
65 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
67 static int return_zero (void);
69 static int return_minus_one (void);
71 static void *return_null (void);
73 void target_ignore (void);
75 static void target_command (char *, int);
77 static struct target_ops
*find_default_run_target (char *);
79 static target_xfer_partial_ftype default_xfer_partial
;
81 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
84 static int dummy_find_memory_regions (struct target_ops
*self
,
85 find_memory_region_ftype ignore1
,
88 static char *dummy_make_corefile_notes (struct target_ops
*self
,
89 bfd
*ignore1
, int *ignore2
);
91 static int find_default_can_async_p (struct target_ops
*ignore
);
93 static int find_default_is_async_p (struct target_ops
*ignore
);
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops
*self
);
98 #include "target-delegates.c"
100 static void init_dummy_target (void);
102 static struct target_ops debug_target
;
104 static void debug_to_open (char *, int);
106 static void debug_to_prepare_to_store (struct target_ops
*self
,
109 static void debug_to_files_info (struct target_ops
*);
111 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
112 struct bp_target_info
*);
114 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
115 struct bp_target_info
*);
117 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
120 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
122 struct bp_target_info
*);
124 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
126 struct bp_target_info
*);
128 static int debug_to_insert_watchpoint (struct target_ops
*self
,
130 struct expression
*);
132 static int debug_to_remove_watchpoint (struct target_ops
*self
,
134 struct expression
*);
136 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
138 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
139 CORE_ADDR
, CORE_ADDR
, int);
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
146 struct expression
*);
148 static void debug_to_terminal_init (struct target_ops
*self
);
150 static void debug_to_terminal_inferior (struct target_ops
*self
);
152 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
154 static void debug_to_terminal_save_ours (struct target_ops
*self
);
156 static void debug_to_terminal_ours (struct target_ops
*self
);
158 static void debug_to_load (struct target_ops
*self
, char *, int);
160 static int debug_to_can_run (struct target_ops
*self
);
162 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
167 struct target_ops
**target_structs
;
168 unsigned target_struct_size
;
169 unsigned target_struct_allocsize
;
170 #define DEFAULT_ALLOCSIZE 10
172 /* The initial current target, so that there is always a semi-valid
175 static struct target_ops dummy_target
;
177 /* Top of target stack. */
179 static struct target_ops
*target_stack
;
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
184 struct target_ops current_target
;
186 /* Command list for target. */
188 static struct cmd_list_element
*targetlist
= NULL
;
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
193 static int trust_readonly
= 0;
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
198 static int show_memory_breakpoints
= 0;
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
204 int may_write_registers
= 1;
206 int may_write_memory
= 1;
208 int may_insert_breakpoints
= 1;
210 int may_insert_tracepoints
= 1;
212 int may_insert_fast_tracepoints
= 1;
216 /* Non-zero if we want to see trace of target level stuff. */
218 static unsigned int targetdebug
= 0;
220 show_targetdebug (struct ui_file
*file
, int from_tty
,
221 struct cmd_list_element
*c
, const char *value
)
223 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
226 static void setup_target_debug (void);
228 /* The user just typed 'target' without the name of a target. */
231 target_command (char *arg
, int from_tty
)
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
237 /* Default target_has_* methods for process_stratum targets. */
240 default_child_has_all_memory (struct target_ops
*ops
)
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid
, null_ptid
))
250 default_child_has_memory (struct target_ops
*ops
)
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid
, null_ptid
))
260 default_child_has_stack (struct target_ops
*ops
)
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid
, null_ptid
))
270 default_child_has_registers (struct target_ops
*ops
)
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid
, null_ptid
))
280 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
282 /* If there's no thread selected, then we can't make it run through
284 if (ptid_equal (the_ptid
, null_ptid
))
292 target_has_all_memory_1 (void)
294 struct target_ops
*t
;
296 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
297 if (t
->to_has_all_memory (t
))
304 target_has_memory_1 (void)
306 struct target_ops
*t
;
308 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
309 if (t
->to_has_memory (t
))
316 target_has_stack_1 (void)
318 struct target_ops
*t
;
320 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
321 if (t
->to_has_stack (t
))
328 target_has_registers_1 (void)
330 struct target_ops
*t
;
332 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
333 if (t
->to_has_registers (t
))
340 target_has_execution_1 (ptid_t the_ptid
)
342 struct target_ops
*t
;
344 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
345 if (t
->to_has_execution (t
, the_ptid
))
352 target_has_execution_current (void)
354 return target_has_execution_1 (inferior_ptid
);
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
361 complete_target_initialization (struct target_ops
*t
)
363 /* Provide default values for all "must have" methods. */
364 if (t
->to_xfer_partial
== NULL
)
365 t
->to_xfer_partial
= default_xfer_partial
;
367 if (t
->to_has_all_memory
== NULL
)
368 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
370 if (t
->to_has_memory
== NULL
)
371 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
373 if (t
->to_has_stack
== NULL
)
374 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
376 if (t
->to_has_registers
== NULL
)
377 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
379 if (t
->to_has_execution
== NULL
)
380 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
382 install_delegators (t
);
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
390 add_target_with_completer (struct target_ops
*t
,
391 completer_ftype
*completer
)
393 struct cmd_list_element
*c
;
395 complete_target_initialization (t
);
399 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
400 target_structs
= (struct target_ops
**) xmalloc
401 (target_struct_allocsize
* sizeof (*target_structs
));
403 if (target_struct_size
>= target_struct_allocsize
)
405 target_struct_allocsize
*= 2;
406 target_structs
= (struct target_ops
**)
407 xrealloc ((char *) target_structs
,
408 target_struct_allocsize
* sizeof (*target_structs
));
410 target_structs
[target_struct_size
++] = t
;
412 if (targetlist
== NULL
)
413 add_prefix_cmd ("target", class_run
, target_command
, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist
, "target ", 0, &cmdlist
);
420 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
422 if (completer
!= NULL
)
423 set_cmd_completer (c
, completer
);
426 /* Add a possible target architecture to the list. */
429 add_target (struct target_ops
*t
)
431 add_target_with_completer (t
, NULL
);
437 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
439 struct cmd_list_element
*c
;
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
444 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
445 alt
= xstrprintf ("target %s", t
->to_shortname
);
446 deprecate_cmd (c
, alt
);
459 struct target_ops
*t
;
461 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
462 if (t
->to_kill
!= NULL
)
465 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
475 target_load (char *arg
, int from_tty
)
477 target_dcache_invalidate ();
478 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
482 target_create_inferior (char *exec_file
, char *args
,
483 char **env
, int from_tty
)
485 struct target_ops
*t
;
487 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
489 if (t
->to_create_inferior
!= NULL
)
491 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
493 fprintf_unfiltered (gdb_stdlog
,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file
, args
, from_tty
);
500 internal_error (__FILE__
, __LINE__
,
501 _("could not find a target to create inferior"));
505 target_terminal_inferior (void)
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution
)
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target
.to_terminal_inferior
) (¤t_target
);
520 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
521 struct target_ops
*t
)
523 errno
= EIO
; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
530 error (_("You can't do that when your target is `%s'"),
531 current_target
.to_shortname
);
537 error (_("You can't do that without a process to debug."));
541 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
543 printf_unfiltered (_("No saved terminal information.\n"));
546 /* A default implementation for the to_get_ada_task_ptid target method.
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
553 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
555 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops
*self
)
561 if (!target_can_execute_reverse
)
563 else if (!target_can_async_p ())
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
583 update_current_target (void)
585 struct target_ops
*t
;
587 /* First, reset current's contents. */
588 memset (¤t_target
, 0, sizeof (current_target
));
590 /* Install the delegators. */
591 install_delegators (¤t_target
);
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
597 for (t
= target_stack
; t
; t
= t
->beneath
)
599 INHERIT (to_shortname
, t
);
600 INHERIT (to_longname
, t
);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait
, t
);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory
, t
);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint
, t
);
628 INHERIT (to_have_continuable_watchpoint
, t
);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run
, t
);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop
, t
);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum
, t
);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control
, t
);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 /* Do not inherit to_enable_tracepoint. */
696 /* Do not inherit to_disable_tracepoint. */
697 /* Do not inherit to_trace_set_readonly_regions. */
698 /* Do not inherit to_trace_start. */
699 /* Do not inherit to_get_trace_status. */
700 /* Do not inherit to_get_tracepoint_status. */
701 /* Do not inherit to_trace_stop. */
702 /* Do not inherit to_trace_find. */
703 /* Do not inherit to_get_trace_state_variable_value. */
704 /* Do not inherit to_save_trace_data. */
705 INHERIT (to_upload_tracepoints
, t
);
706 INHERIT (to_upload_trace_state_variables
, t
);
707 INHERIT (to_get_raw_trace_data
, t
);
708 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
709 INHERIT (to_set_disconnected_tracing
, t
);
710 INHERIT (to_set_circular_trace_buffer
, t
);
711 INHERIT (to_set_trace_buffer_size
, t
);
712 INHERIT (to_set_trace_notes
, t
);
713 INHERIT (to_get_tib_address
, t
);
714 INHERIT (to_set_permissions
, t
);
715 INHERIT (to_static_tracepoint_marker_at
, t
);
716 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
717 INHERIT (to_traceframe_info
, t
);
718 INHERIT (to_use_agent
, t
);
719 INHERIT (to_can_use_agent
, t
);
720 INHERIT (to_augmented_libraries_svr4_read
, t
);
721 INHERIT (to_magic
, t
);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
723 INHERIT (to_can_run_breakpoint_commands
, t
);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
739 (void (*) (char *, int))
742 (void (*) (struct target_ops
*))
744 de_fault (deprecated_xfer_memory
,
745 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
746 struct mem_attrib
*, struct target_ops
*))
748 de_fault (to_can_run
,
749 (int (*) (struct target_ops
*))
752 (void (*) (struct target_ops
*, ptid_t
))
754 current_target
.to_read_description
= NULL
;
755 de_fault (to_upload_tracepoints
,
756 (int (*) (struct target_ops
*, struct uploaded_tp
**))
758 de_fault (to_upload_trace_state_variables
,
759 (int (*) (struct target_ops
*, struct uploaded_tsv
**))
761 de_fault (to_get_raw_trace_data
,
762 (LONGEST (*) (struct target_ops
*, gdb_byte
*, ULONGEST
, LONGEST
))
764 de_fault (to_get_min_fast_tracepoint_insn_len
,
765 (int (*) (struct target_ops
*))
767 de_fault (to_set_disconnected_tracing
,
768 (void (*) (struct target_ops
*, int))
770 de_fault (to_set_circular_trace_buffer
,
771 (void (*) (struct target_ops
*, int))
773 de_fault (to_set_trace_buffer_size
,
774 (void (*) (struct target_ops
*, LONGEST
))
776 de_fault (to_set_trace_notes
,
777 (int (*) (struct target_ops
*,
778 const char *, const char *, const char *))
780 de_fault (to_get_tib_address
,
781 (int (*) (struct target_ops
*, ptid_t
, CORE_ADDR
*))
783 de_fault (to_set_permissions
,
784 (void (*) (struct target_ops
*))
786 de_fault (to_static_tracepoint_marker_at
,
787 (int (*) (struct target_ops
*,
788 CORE_ADDR
, struct static_tracepoint_marker
*))
790 de_fault (to_static_tracepoint_markers_by_strid
,
791 (VEC(static_tracepoint_marker_p
) * (*) (struct target_ops
*,
794 de_fault (to_traceframe_info
,
795 (struct traceframe_info
* (*) (struct target_ops
*))
797 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
798 (int (*) (struct target_ops
*))
800 de_fault (to_can_run_breakpoint_commands
,
801 (int (*) (struct target_ops
*))
803 de_fault (to_use_agent
,
804 (int (*) (struct target_ops
*, int))
806 de_fault (to_can_use_agent
,
807 (int (*) (struct target_ops
*))
809 de_fault (to_augmented_libraries_svr4_read
,
810 (int (*) (struct target_ops
*))
815 /* Finally, position the target-stack beneath the squashed
816 "current_target". That way code looking for a non-inherited
817 target method can quickly and simply find it. */
818 current_target
.beneath
= target_stack
;
821 setup_target_debug ();
824 /* Push a new target type into the stack of the existing target accessors,
825 possibly superseding some of the existing accessors.
827 Rather than allow an empty stack, we always have the dummy target at
828 the bottom stratum, so we can call the function vectors without
832 push_target (struct target_ops
*t
)
834 struct target_ops
**cur
;
836 /* Check magic number. If wrong, it probably means someone changed
837 the struct definition, but not all the places that initialize one. */
838 if (t
->to_magic
!= OPS_MAGIC
)
840 fprintf_unfiltered (gdb_stderr
,
841 "Magic number of %s target struct wrong\n",
843 internal_error (__FILE__
, __LINE__
,
844 _("failed internal consistency check"));
847 /* Find the proper stratum to install this target in. */
848 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
850 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
854 /* If there's already targets at this stratum, remove them. */
855 /* FIXME: cagney/2003-10-15: I think this should be popping all
856 targets to CUR, and not just those at this stratum level. */
857 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
859 /* There's already something at this stratum level. Close it,
860 and un-hook it from the stack. */
861 struct target_ops
*tmp
= (*cur
);
863 (*cur
) = (*cur
)->beneath
;
868 /* We have removed all targets in our stratum, now add the new one. */
872 update_current_target ();
875 /* Remove a target_ops vector from the stack, wherever it may be.
876 Return how many times it was removed (0 or 1). */
879 unpush_target (struct target_ops
*t
)
881 struct target_ops
**cur
;
882 struct target_ops
*tmp
;
884 if (t
->to_stratum
== dummy_stratum
)
885 internal_error (__FILE__
, __LINE__
,
886 _("Attempt to unpush the dummy target"));
888 /* Look for the specified target. Note that we assume that a target
889 can only occur once in the target stack. */
891 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
897 /* If we don't find target_ops, quit. Only open targets should be
902 /* Unchain the target. */
904 (*cur
) = (*cur
)->beneath
;
907 update_current_target ();
909 /* Finally close the target. Note we do this after unchaining, so
910 any target method calls from within the target_close
911 implementation don't end up in T anymore. */
918 pop_all_targets_above (enum strata above_stratum
)
920 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
922 if (!unpush_target (target_stack
))
924 fprintf_unfiltered (gdb_stderr
,
925 "pop_all_targets couldn't find target %s\n",
926 target_stack
->to_shortname
);
927 internal_error (__FILE__
, __LINE__
,
928 _("failed internal consistency check"));
935 pop_all_targets (void)
937 pop_all_targets_above (dummy_stratum
);
940 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
943 target_is_pushed (struct target_ops
*t
)
945 struct target_ops
**cur
;
947 /* Check magic number. If wrong, it probably means someone changed
948 the struct definition, but not all the places that initialize one. */
949 if (t
->to_magic
!= OPS_MAGIC
)
951 fprintf_unfiltered (gdb_stderr
,
952 "Magic number of %s target struct wrong\n",
954 internal_error (__FILE__
, __LINE__
,
955 _("failed internal consistency check"));
958 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
965 /* Using the objfile specified in OBJFILE, find the address for the
966 current thread's thread-local storage with offset OFFSET. */
968 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
970 volatile CORE_ADDR addr
= 0;
971 struct target_ops
*target
;
973 for (target
= current_target
.beneath
;
975 target
= target
->beneath
)
977 if (target
->to_get_thread_local_address
!= NULL
)
982 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
984 ptid_t ptid
= inferior_ptid
;
985 volatile struct gdb_exception ex
;
987 TRY_CATCH (ex
, RETURN_MASK_ALL
)
991 /* Fetch the load module address for this objfile. */
992 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
994 /* If it's 0, throw the appropriate exception. */
996 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
997 _("TLS load module not found"));
999 addr
= target
->to_get_thread_local_address (target
, ptid
,
1002 /* If an error occurred, print TLS related messages here. Otherwise,
1003 throw the error to some higher catcher. */
1006 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1010 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1011 error (_("Cannot find thread-local variables "
1012 "in this thread library."));
1014 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1015 if (objfile_is_library
)
1016 error (_("Cannot find shared library `%s' in dynamic"
1017 " linker's load module list"), objfile_name (objfile
));
1019 error (_("Cannot find executable file `%s' in dynamic"
1020 " linker's load module list"), objfile_name (objfile
));
1022 case TLS_NOT_ALLOCATED_YET_ERROR
:
1023 if (objfile_is_library
)
1024 error (_("The inferior has not yet allocated storage for"
1025 " thread-local variables in\n"
1026 "the shared library `%s'\n"
1028 objfile_name (objfile
), target_pid_to_str (ptid
));
1030 error (_("The inferior has not yet allocated storage for"
1031 " thread-local variables in\n"
1032 "the executable `%s'\n"
1034 objfile_name (objfile
), target_pid_to_str (ptid
));
1036 case TLS_GENERIC_ERROR
:
1037 if (objfile_is_library
)
1038 error (_("Cannot find thread-local storage for %s, "
1039 "shared library %s:\n%s"),
1040 target_pid_to_str (ptid
),
1041 objfile_name (objfile
), ex
.message
);
1043 error (_("Cannot find thread-local storage for %s, "
1044 "executable file %s:\n%s"),
1045 target_pid_to_str (ptid
),
1046 objfile_name (objfile
), ex
.message
);
1049 throw_exception (ex
);
1054 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1055 TLS is an ABI-specific thing. But we don't do that yet. */
1057 error (_("Cannot find thread-local variables on this target"));
1063 target_xfer_status_to_string (enum target_xfer_status err
)
1065 #define CASE(X) case X: return #X
1068 CASE(TARGET_XFER_E_IO
);
1069 CASE(TARGET_XFER_E_UNAVAILABLE
);
1078 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1080 /* target_read_string -- read a null terminated string, up to LEN bytes,
1081 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1082 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1083 is responsible for freeing it. Return the number of bytes successfully
1087 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1089 int tlen
, offset
, i
;
1093 int buffer_allocated
;
1095 unsigned int nbytes_read
= 0;
1097 gdb_assert (string
);
1099 /* Small for testing. */
1100 buffer_allocated
= 4;
1101 buffer
= xmalloc (buffer_allocated
);
1106 tlen
= MIN (len
, 4 - (memaddr
& 3));
1107 offset
= memaddr
& 3;
1109 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1112 /* The transfer request might have crossed the boundary to an
1113 unallocated region of memory. Retry the transfer, requesting
1117 errcode
= target_read_memory (memaddr
, buf
, 1);
1122 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1126 bytes
= bufptr
- buffer
;
1127 buffer_allocated
*= 2;
1128 buffer
= xrealloc (buffer
, buffer_allocated
);
1129 bufptr
= buffer
+ bytes
;
1132 for (i
= 0; i
< tlen
; i
++)
1134 *bufptr
++ = buf
[i
+ offset
];
1135 if (buf
[i
+ offset
] == '\000')
1137 nbytes_read
+= i
+ 1;
1144 nbytes_read
+= tlen
;
1153 struct target_section_table
*
1154 target_get_section_table (struct target_ops
*target
)
1156 struct target_ops
*t
;
1159 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1161 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1162 if (t
->to_get_section_table
!= NULL
)
1163 return (*t
->to_get_section_table
) (t
);
1168 /* Find a section containing ADDR. */
1170 struct target_section
*
1171 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1173 struct target_section_table
*table
= target_get_section_table (target
);
1174 struct target_section
*secp
;
1179 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1181 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1187 /* Read memory from the live target, even if currently inspecting a
1188 traceframe. The return is the same as that of target_read. */
1190 static enum target_xfer_status
1191 target_read_live_memory (enum target_object object
,
1192 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1193 ULONGEST
*xfered_len
)
1195 enum target_xfer_status ret
;
1196 struct cleanup
*cleanup
;
1198 /* Switch momentarily out of tfind mode so to access live memory.
1199 Note that this must not clear global state, such as the frame
1200 cache, which must still remain valid for the previous traceframe.
1201 We may be _building_ the frame cache at this point. */
1202 cleanup
= make_cleanup_restore_traceframe_number ();
1203 set_traceframe_number (-1);
1205 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1206 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1208 do_cleanups (cleanup
);
1212 /* Using the set of read-only target sections of OPS, read live
1213 read-only memory. Note that the actual reads start from the
1214 top-most target again.
1216 For interface/parameters/return description see target.h,
1219 static enum target_xfer_status
1220 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1221 enum target_object object
,
1222 gdb_byte
*readbuf
, ULONGEST memaddr
,
1223 ULONGEST len
, ULONGEST
*xfered_len
)
1225 struct target_section
*secp
;
1226 struct target_section_table
*table
;
1228 secp
= target_section_by_addr (ops
, memaddr
);
1230 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1231 secp
->the_bfd_section
)
1234 struct target_section
*p
;
1235 ULONGEST memend
= memaddr
+ len
;
1237 table
= target_get_section_table (ops
);
1239 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1241 if (memaddr
>= p
->addr
)
1243 if (memend
<= p
->endaddr
)
1245 /* Entire transfer is within this section. */
1246 return target_read_live_memory (object
, memaddr
,
1247 readbuf
, len
, xfered_len
);
1249 else if (memaddr
>= p
->endaddr
)
1251 /* This section ends before the transfer starts. */
1256 /* This section overlaps the transfer. Just do half. */
1257 len
= p
->endaddr
- memaddr
;
1258 return target_read_live_memory (object
, memaddr
,
1259 readbuf
, len
, xfered_len
);
1265 return TARGET_XFER_EOF
;
1268 /* Read memory from more than one valid target. A core file, for
1269 instance, could have some of memory but delegate other bits to
1270 the target below it. So, we must manually try all targets. */
1272 static enum target_xfer_status
1273 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1274 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1275 ULONGEST
*xfered_len
)
1277 enum target_xfer_status res
;
1281 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1282 readbuf
, writebuf
, memaddr
, len
,
1284 if (res
== TARGET_XFER_OK
)
1287 /* Stop if the target reports that the memory is not available. */
1288 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1291 /* We want to continue past core files to executables, but not
1292 past a running target's memory. */
1293 if (ops
->to_has_all_memory (ops
))
1298 while (ops
!= NULL
);
1303 /* Perform a partial memory transfer.
1304 For docs see target.h, to_xfer_partial. */
1306 static enum target_xfer_status
1307 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1308 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1309 ULONGEST len
, ULONGEST
*xfered_len
)
1311 enum target_xfer_status res
;
1313 struct mem_region
*region
;
1314 struct inferior
*inf
;
1316 /* For accesses to unmapped overlay sections, read directly from
1317 files. Must do this first, as MEMADDR may need adjustment. */
1318 if (readbuf
!= NULL
&& overlay_debugging
)
1320 struct obj_section
*section
= find_pc_overlay (memaddr
);
1322 if (pc_in_unmapped_range (memaddr
, section
))
1324 struct target_section_table
*table
1325 = target_get_section_table (ops
);
1326 const char *section_name
= section
->the_bfd_section
->name
;
1328 memaddr
= overlay_mapped_address (memaddr
, section
);
1329 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1330 memaddr
, len
, xfered_len
,
1332 table
->sections_end
,
1337 /* Try the executable files, if "trust-readonly-sections" is set. */
1338 if (readbuf
!= NULL
&& trust_readonly
)
1340 struct target_section
*secp
;
1341 struct target_section_table
*table
;
1343 secp
= target_section_by_addr (ops
, memaddr
);
1345 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1346 secp
->the_bfd_section
)
1349 table
= target_get_section_table (ops
);
1350 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1351 memaddr
, len
, xfered_len
,
1353 table
->sections_end
,
1358 /* If reading unavailable memory in the context of traceframes, and
1359 this address falls within a read-only section, fallback to
1360 reading from live memory. */
1361 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1363 VEC(mem_range_s
) *available
;
1365 /* If we fail to get the set of available memory, then the
1366 target does not support querying traceframe info, and so we
1367 attempt reading from the traceframe anyway (assuming the
1368 target implements the old QTro packet then). */
1369 if (traceframe_available_memory (&available
, memaddr
, len
))
1371 struct cleanup
*old_chain
;
1373 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1375 if (VEC_empty (mem_range_s
, available
)
1376 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1378 /* Don't read into the traceframe's available
1380 if (!VEC_empty (mem_range_s
, available
))
1382 LONGEST oldlen
= len
;
1384 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1385 gdb_assert (len
<= oldlen
);
1388 do_cleanups (old_chain
);
1390 /* This goes through the topmost target again. */
1391 res
= memory_xfer_live_readonly_partial (ops
, object
,
1394 if (res
== TARGET_XFER_OK
)
1395 return TARGET_XFER_OK
;
1398 /* No use trying further, we know some memory starting
1399 at MEMADDR isn't available. */
1401 return TARGET_XFER_E_UNAVAILABLE
;
1405 /* Don't try to read more than how much is available, in
1406 case the target implements the deprecated QTro packet to
1407 cater for older GDBs (the target's knowledge of read-only
1408 sections may be outdated by now). */
1409 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1411 do_cleanups (old_chain
);
1415 /* Try GDB's internal data cache. */
1416 region
= lookup_mem_region (memaddr
);
1417 /* region->hi == 0 means there's no upper bound. */
1418 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1421 reg_len
= region
->hi
- memaddr
;
1423 switch (region
->attrib
.mode
)
1426 if (writebuf
!= NULL
)
1427 return TARGET_XFER_E_IO
;
1431 if (readbuf
!= NULL
)
1432 return TARGET_XFER_E_IO
;
1436 /* We only support writing to flash during "load" for now. */
1437 if (writebuf
!= NULL
)
1438 error (_("Writing to flash memory forbidden in this context"));
1442 return TARGET_XFER_E_IO
;
1445 if (!ptid_equal (inferior_ptid
, null_ptid
))
1446 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1451 /* The dcache reads whole cache lines; that doesn't play well
1452 with reading from a trace buffer, because reading outside of
1453 the collected memory range fails. */
1454 && get_traceframe_number () == -1
1455 && (region
->attrib
.cache
1456 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1457 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1459 DCACHE
*dcache
= target_dcache_get_or_init ();
1462 if (readbuf
!= NULL
)
1463 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1465 /* FIXME drow/2006-08-09: If we're going to preserve const
1466 correctness dcache_xfer_memory should take readbuf and
1468 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1471 return TARGET_XFER_E_IO
;
1474 *xfered_len
= (ULONGEST
) l
;
1475 return TARGET_XFER_OK
;
1479 /* If none of those methods found the memory we wanted, fall back
1480 to a target partial transfer. Normally a single call to
1481 to_xfer_partial is enough; if it doesn't recognize an object
1482 it will call the to_xfer_partial of the next target down.
1483 But for memory this won't do. Memory is the only target
1484 object which can be read from more than one valid target.
1485 A core file, for instance, could have some of memory but
1486 delegate other bits to the target below it. So, we must
1487 manually try all targets. */
1489 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1492 /* Make sure the cache gets updated no matter what - if we are writing
1493 to the stack. Even if this write is not tagged as such, we still need
1494 to update the cache. */
1496 if (res
== TARGET_XFER_OK
1499 && target_dcache_init_p ()
1500 && !region
->attrib
.cache
1501 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1502 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1504 DCACHE
*dcache
= target_dcache_get ();
1506 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1509 /* If we still haven't got anything, return the last error. We
1514 /* Perform a partial memory transfer. For docs see target.h,
1517 static enum target_xfer_status
1518 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1519 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1520 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1522 enum target_xfer_status res
;
1524 /* Zero length requests are ok and require no work. */
1526 return TARGET_XFER_EOF
;
1528 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1529 breakpoint insns, thus hiding out from higher layers whether
1530 there are software breakpoints inserted in the code stream. */
1531 if (readbuf
!= NULL
)
1533 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1536 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1537 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1542 struct cleanup
*old_chain
;
1544 /* A large write request is likely to be partially satisfied
1545 by memory_xfer_partial_1. We will continually malloc
1546 and free a copy of the entire write request for breakpoint
1547 shadow handling even though we only end up writing a small
1548 subset of it. Cap writes to 4KB to mitigate this. */
1549 len
= min (4096, len
);
1551 buf
= xmalloc (len
);
1552 old_chain
= make_cleanup (xfree
, buf
);
1553 memcpy (buf
, writebuf
, len
);
1555 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1556 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1559 do_cleanups (old_chain
);
1566 restore_show_memory_breakpoints (void *arg
)
1568 show_memory_breakpoints
= (uintptr_t) arg
;
1572 make_show_memory_breakpoints_cleanup (int show
)
1574 int current
= show_memory_breakpoints
;
1576 show_memory_breakpoints
= show
;
1577 return make_cleanup (restore_show_memory_breakpoints
,
1578 (void *) (uintptr_t) current
);
1581 /* For docs see target.h, to_xfer_partial. */
1583 enum target_xfer_status
1584 target_xfer_partial (struct target_ops
*ops
,
1585 enum target_object object
, const char *annex
,
1586 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1587 ULONGEST offset
, ULONGEST len
,
1588 ULONGEST
*xfered_len
)
1590 enum target_xfer_status retval
;
1592 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1594 /* Transfer is done when LEN is zero. */
1596 return TARGET_XFER_EOF
;
1598 if (writebuf
&& !may_write_memory
)
1599 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1600 core_addr_to_string_nz (offset
), plongest (len
));
1604 /* If this is a memory transfer, let the memory-specific code
1605 have a look at it instead. Memory transfers are more
1607 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1608 || object
== TARGET_OBJECT_CODE_MEMORY
)
1609 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1610 writebuf
, offset
, len
, xfered_len
);
1611 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1613 /* Request the normal memory object from other layers. */
1614 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1618 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1619 writebuf
, offset
, len
, xfered_len
);
1623 const unsigned char *myaddr
= NULL
;
1625 fprintf_unfiltered (gdb_stdlog
,
1626 "%s:target_xfer_partial "
1627 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1630 (annex
? annex
: "(null)"),
1631 host_address_to_string (readbuf
),
1632 host_address_to_string (writebuf
),
1633 core_addr_to_string_nz (offset
),
1634 pulongest (len
), retval
,
1635 pulongest (*xfered_len
));
1641 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1645 fputs_unfiltered (", bytes =", gdb_stdlog
);
1646 for (i
= 0; i
< *xfered_len
; i
++)
1648 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1650 if (targetdebug
< 2 && i
> 0)
1652 fprintf_unfiltered (gdb_stdlog
, " ...");
1655 fprintf_unfiltered (gdb_stdlog
, "\n");
1658 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1662 fputc_unfiltered ('\n', gdb_stdlog
);
1665 /* Check implementations of to_xfer_partial update *XFERED_LEN
1666 properly. Do assertion after printing debug messages, so that we
1667 can find more clues on assertion failure from debugging messages. */
1668 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1669 gdb_assert (*xfered_len
> 0);
1674 /* Read LEN bytes of target memory at address MEMADDR, placing the
1675 results in GDB's memory at MYADDR. Returns either 0 for success or
1676 TARGET_XFER_E_IO if any error occurs.
1678 If an error occurs, no guarantee is made about the contents of the data at
1679 MYADDR. In particular, the caller should not depend upon partial reads
1680 filling the buffer with good data. There is no way for the caller to know
1681 how much good data might have been transfered anyway. Callers that can
1682 deal with partial reads should call target_read (which will retry until
1683 it makes no progress, and then return how much was transferred). */
1686 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1688 /* Dispatch to the topmost target, not the flattened current_target.
1689 Memory accesses check target->to_has_(all_)memory, and the
1690 flattened target doesn't inherit those. */
1691 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1692 myaddr
, memaddr
, len
) == len
)
1695 return TARGET_XFER_E_IO
;
1698 /* Like target_read_memory, but specify explicitly that this is a read
1699 from the target's raw memory. That is, this read bypasses the
1700 dcache, breakpoint shadowing, etc. */
1703 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1705 /* See comment in target_read_memory about why the request starts at
1706 current_target.beneath. */
1707 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1708 myaddr
, memaddr
, len
) == len
)
1711 return TARGET_XFER_E_IO
;
1714 /* Like target_read_memory, but specify explicitly that this is a read from
1715 the target's stack. This may trigger different cache behavior. */
1718 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1720 /* See comment in target_read_memory about why the request starts at
1721 current_target.beneath. */
1722 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1723 myaddr
, memaddr
, len
) == len
)
1726 return TARGET_XFER_E_IO
;
1729 /* Like target_read_memory, but specify explicitly that this is a read from
1730 the target's code. This may trigger different cache behavior. */
1733 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1735 /* See comment in target_read_memory about why the request starts at
1736 current_target.beneath. */
1737 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1738 myaddr
, memaddr
, len
) == len
)
1741 return TARGET_XFER_E_IO
;
1744 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1745 Returns either 0 for success or TARGET_XFER_E_IO if any
1746 error occurs. If an error occurs, no guarantee is made about how
1747 much data got written. Callers that can deal with partial writes
1748 should call target_write. */
1751 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1753 /* See comment in target_read_memory about why the request starts at
1754 current_target.beneath. */
1755 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1756 myaddr
, memaddr
, len
) == len
)
1759 return TARGET_XFER_E_IO
;
1762 /* Write LEN bytes from MYADDR to target raw memory at address
1763 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1764 if any error occurs. If an error occurs, no guarantee is made
1765 about how much data got written. Callers that can deal with
1766 partial writes should call target_write. */
1769 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1771 /* See comment in target_read_memory about why the request starts at
1772 current_target.beneath. */
1773 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1774 myaddr
, memaddr
, len
) == len
)
1777 return TARGET_XFER_E_IO
;
1780 /* Fetch the target's memory map. */
1783 target_memory_map (void)
1785 VEC(mem_region_s
) *result
;
1786 struct mem_region
*last_one
, *this_one
;
1788 struct target_ops
*t
;
1791 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1793 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1794 if (t
->to_memory_map
!= NULL
)
1800 result
= t
->to_memory_map (t
);
1804 qsort (VEC_address (mem_region_s
, result
),
1805 VEC_length (mem_region_s
, result
),
1806 sizeof (struct mem_region
), mem_region_cmp
);
1808 /* Check that regions do not overlap. Simultaneously assign
1809 a numbering for the "mem" commands to use to refer to
1812 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1814 this_one
->number
= ix
;
1816 if (last_one
&& last_one
->hi
> this_one
->lo
)
1818 warning (_("Overlapping regions in memory map: ignoring"));
1819 VEC_free (mem_region_s
, result
);
1822 last_one
= this_one
;
1829 target_flash_erase (ULONGEST address
, LONGEST length
)
1831 struct target_ops
*t
;
1833 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1834 if (t
->to_flash_erase
!= NULL
)
1837 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1838 hex_string (address
), phex (length
, 0));
1839 t
->to_flash_erase (t
, address
, length
);
1847 target_flash_done (void)
1849 struct target_ops
*t
;
1851 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1852 if (t
->to_flash_done
!= NULL
)
1855 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1856 t
->to_flash_done (t
);
1864 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1865 struct cmd_list_element
*c
, const char *value
)
1867 fprintf_filtered (file
,
1868 _("Mode for reading from readonly sections is %s.\n"),
1872 /* More generic transfers. */
1874 static enum target_xfer_status
1875 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1876 const char *annex
, gdb_byte
*readbuf
,
1877 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1878 ULONGEST
*xfered_len
)
1880 if (object
== TARGET_OBJECT_MEMORY
1881 && ops
->deprecated_xfer_memory
!= NULL
)
1882 /* If available, fall back to the target's
1883 "deprecated_xfer_memory" method. */
1888 if (writebuf
!= NULL
)
1890 void *buffer
= xmalloc (len
);
1891 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1893 memcpy (buffer
, writebuf
, len
);
1894 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1895 1/*write*/, NULL
, ops
);
1896 do_cleanups (cleanup
);
1898 if (readbuf
!= NULL
)
1899 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1900 0/*read*/, NULL
, ops
);
1903 *xfered_len
= (ULONGEST
) xfered
;
1904 return TARGET_XFER_E_IO
;
1906 else if (xfered
== 0 && errno
== 0)
1907 /* "deprecated_xfer_memory" uses 0, cross checked against
1908 ERRNO as one indication of an error. */
1909 return TARGET_XFER_EOF
;
1911 return TARGET_XFER_E_IO
;
1915 gdb_assert (ops
->beneath
!= NULL
);
1916 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1917 readbuf
, writebuf
, offset
, len
,
1922 /* Target vector read/write partial wrapper functions. */
1924 static enum target_xfer_status
1925 target_read_partial (struct target_ops
*ops
,
1926 enum target_object object
,
1927 const char *annex
, gdb_byte
*buf
,
1928 ULONGEST offset
, ULONGEST len
,
1929 ULONGEST
*xfered_len
)
1931 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1935 static enum target_xfer_status
1936 target_write_partial (struct target_ops
*ops
,
1937 enum target_object object
,
1938 const char *annex
, const gdb_byte
*buf
,
1939 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1941 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1945 /* Wrappers to perform the full transfer. */
1947 /* For docs on target_read see target.h. */
1950 target_read (struct target_ops
*ops
,
1951 enum target_object object
,
1952 const char *annex
, gdb_byte
*buf
,
1953 ULONGEST offset
, LONGEST len
)
1957 while (xfered
< len
)
1959 ULONGEST xfered_len
;
1960 enum target_xfer_status status
;
1962 status
= target_read_partial (ops
, object
, annex
,
1963 (gdb_byte
*) buf
+ xfered
,
1964 offset
+ xfered
, len
- xfered
,
1967 /* Call an observer, notifying them of the xfer progress? */
1968 if (status
== TARGET_XFER_EOF
)
1970 else if (status
== TARGET_XFER_OK
)
1972 xfered
+= xfered_len
;
1982 /* Assuming that the entire [begin, end) range of memory cannot be
1983 read, try to read whatever subrange is possible to read.
1985 The function returns, in RESULT, either zero or one memory block.
1986 If there's a readable subrange at the beginning, it is completely
1987 read and returned. Any further readable subrange will not be read.
1988 Otherwise, if there's a readable subrange at the end, it will be
1989 completely read and returned. Any readable subranges before it
1990 (obviously, not starting at the beginning), will be ignored. In
1991 other cases -- either no readable subrange, or readable subrange(s)
1992 that is neither at the beginning, or end, nothing is returned.
1994 The purpose of this function is to handle a read across a boundary
1995 of accessible memory in a case when memory map is not available.
1996 The above restrictions are fine for this case, but will give
1997 incorrect results if the memory is 'patchy'. However, supporting
1998 'patchy' memory would require trying to read every single byte,
1999 and it seems unacceptable solution. Explicit memory map is
2000 recommended for this case -- and target_read_memory_robust will
2001 take care of reading multiple ranges then. */
2004 read_whatever_is_readable (struct target_ops
*ops
,
2005 ULONGEST begin
, ULONGEST end
,
2006 VEC(memory_read_result_s
) **result
)
2008 gdb_byte
*buf
= xmalloc (end
- begin
);
2009 ULONGEST current_begin
= begin
;
2010 ULONGEST current_end
= end
;
2012 memory_read_result_s r
;
2013 ULONGEST xfered_len
;
2015 /* If we previously failed to read 1 byte, nothing can be done here. */
2016 if (end
- begin
<= 1)
2022 /* Check that either first or the last byte is readable, and give up
2023 if not. This heuristic is meant to permit reading accessible memory
2024 at the boundary of accessible region. */
2025 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2026 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2031 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2032 buf
+ (end
-begin
) - 1, end
- 1, 1,
2033 &xfered_len
) == TARGET_XFER_OK
)
2044 /* Loop invariant is that the [current_begin, current_end) was previously
2045 found to be not readable as a whole.
2047 Note loop condition -- if the range has 1 byte, we can't divide the range
2048 so there's no point trying further. */
2049 while (current_end
- current_begin
> 1)
2051 ULONGEST first_half_begin
, first_half_end
;
2052 ULONGEST second_half_begin
, second_half_end
;
2054 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2058 first_half_begin
= current_begin
;
2059 first_half_end
= middle
;
2060 second_half_begin
= middle
;
2061 second_half_end
= current_end
;
2065 first_half_begin
= middle
;
2066 first_half_end
= current_end
;
2067 second_half_begin
= current_begin
;
2068 second_half_end
= middle
;
2071 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2072 buf
+ (first_half_begin
- begin
),
2074 first_half_end
- first_half_begin
);
2076 if (xfer
== first_half_end
- first_half_begin
)
2078 /* This half reads up fine. So, the error must be in the
2080 current_begin
= second_half_begin
;
2081 current_end
= second_half_end
;
2085 /* This half is not readable. Because we've tried one byte, we
2086 know some part of this half if actually redable. Go to the next
2087 iteration to divide again and try to read.
2089 We don't handle the other half, because this function only tries
2090 to read a single readable subrange. */
2091 current_begin
= first_half_begin
;
2092 current_end
= first_half_end
;
2098 /* The [begin, current_begin) range has been read. */
2100 r
.end
= current_begin
;
2105 /* The [current_end, end) range has been read. */
2106 LONGEST rlen
= end
- current_end
;
2108 r
.data
= xmalloc (rlen
);
2109 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2110 r
.begin
= current_end
;
2114 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2118 free_memory_read_result_vector (void *x
)
2120 VEC(memory_read_result_s
) *v
= x
;
2121 memory_read_result_s
*current
;
2124 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2126 xfree (current
->data
);
2128 VEC_free (memory_read_result_s
, v
);
2131 VEC(memory_read_result_s
) *
2132 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2134 VEC(memory_read_result_s
) *result
= 0;
2137 while (xfered
< len
)
2139 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2142 /* If there is no explicit region, a fake one should be created. */
2143 gdb_assert (region
);
2145 if (region
->hi
== 0)
2146 rlen
= len
- xfered
;
2148 rlen
= region
->hi
- offset
;
2150 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2152 /* Cannot read this region. Note that we can end up here only
2153 if the region is explicitly marked inaccessible, or
2154 'inaccessible-by-default' is in effect. */
2159 LONGEST to_read
= min (len
- xfered
, rlen
);
2160 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2162 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2163 (gdb_byte
*) buffer
,
2164 offset
+ xfered
, to_read
);
2165 /* Call an observer, notifying them of the xfer progress? */
2168 /* Got an error reading full chunk. See if maybe we can read
2171 read_whatever_is_readable (ops
, offset
+ xfered
,
2172 offset
+ xfered
+ to_read
, &result
);
2177 struct memory_read_result r
;
2179 r
.begin
= offset
+ xfered
;
2180 r
.end
= r
.begin
+ xfer
;
2181 VEC_safe_push (memory_read_result_s
, result
, &r
);
2191 /* An alternative to target_write with progress callbacks. */
2194 target_write_with_progress (struct target_ops
*ops
,
2195 enum target_object object
,
2196 const char *annex
, const gdb_byte
*buf
,
2197 ULONGEST offset
, LONGEST len
,
2198 void (*progress
) (ULONGEST
, void *), void *baton
)
2202 /* Give the progress callback a chance to set up. */
2204 (*progress
) (0, baton
);
2206 while (xfered
< len
)
2208 ULONGEST xfered_len
;
2209 enum target_xfer_status status
;
2211 status
= target_write_partial (ops
, object
, annex
,
2212 (gdb_byte
*) buf
+ xfered
,
2213 offset
+ xfered
, len
- xfered
,
2216 if (status
== TARGET_XFER_EOF
)
2218 if (TARGET_XFER_STATUS_ERROR_P (status
))
2221 gdb_assert (status
== TARGET_XFER_OK
);
2223 (*progress
) (xfered_len
, baton
);
2225 xfered
+= xfered_len
;
2231 /* For docs on target_write see target.h. */
2234 target_write (struct target_ops
*ops
,
2235 enum target_object object
,
2236 const char *annex
, const gdb_byte
*buf
,
2237 ULONGEST offset
, LONGEST len
)
2239 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2243 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2244 the size of the transferred data. PADDING additional bytes are
2245 available in *BUF_P. This is a helper function for
2246 target_read_alloc; see the declaration of that function for more
2250 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2251 const char *annex
, gdb_byte
**buf_p
, int padding
)
2253 size_t buf_alloc
, buf_pos
;
2256 /* This function does not have a length parameter; it reads the
2257 entire OBJECT). Also, it doesn't support objects fetched partly
2258 from one target and partly from another (in a different stratum,
2259 e.g. a core file and an executable). Both reasons make it
2260 unsuitable for reading memory. */
2261 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2263 /* Start by reading up to 4K at a time. The target will throttle
2264 this number down if necessary. */
2266 buf
= xmalloc (buf_alloc
);
2270 ULONGEST xfered_len
;
2271 enum target_xfer_status status
;
2273 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2274 buf_pos
, buf_alloc
- buf_pos
- padding
,
2277 if (status
== TARGET_XFER_EOF
)
2279 /* Read all there was. */
2286 else if (status
!= TARGET_XFER_OK
)
2288 /* An error occurred. */
2290 return TARGET_XFER_E_IO
;
2293 buf_pos
+= xfered_len
;
2295 /* If the buffer is filling up, expand it. */
2296 if (buf_alloc
< buf_pos
* 2)
2299 buf
= xrealloc (buf
, buf_alloc
);
2306 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2307 the size of the transferred data. See the declaration in "target.h"
2308 function for more information about the return value. */
2311 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2312 const char *annex
, gdb_byte
**buf_p
)
2314 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2317 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2318 returned as a string, allocated using xmalloc. If an error occurs
2319 or the transfer is unsupported, NULL is returned. Empty objects
2320 are returned as allocated but empty strings. A warning is issued
2321 if the result contains any embedded NUL bytes. */
2324 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2329 LONGEST i
, transferred
;
2331 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2332 bufstr
= (char *) buffer
;
2334 if (transferred
< 0)
2337 if (transferred
== 0)
2338 return xstrdup ("");
2340 bufstr
[transferred
] = 0;
2342 /* Check for embedded NUL bytes; but allow trailing NULs. */
2343 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2346 warning (_("target object %d, annex %s, "
2347 "contained unexpected null characters"),
2348 (int) object
, annex
? annex
: "(none)");
2355 /* Memory transfer methods. */
2358 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2361 /* This method is used to read from an alternate, non-current
2362 target. This read must bypass the overlay support (as symbols
2363 don't match this target), and GDB's internal cache (wrong cache
2364 for this target). */
2365 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2367 memory_error (TARGET_XFER_E_IO
, addr
);
2371 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2372 int len
, enum bfd_endian byte_order
)
2374 gdb_byte buf
[sizeof (ULONGEST
)];
2376 gdb_assert (len
<= sizeof (buf
));
2377 get_target_memory (ops
, addr
, buf
, len
);
2378 return extract_unsigned_integer (buf
, len
, byte_order
);
2384 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2385 struct bp_target_info
*bp_tgt
)
2387 if (!may_insert_breakpoints
)
2389 warning (_("May not insert breakpoints"));
2393 return current_target
.to_insert_breakpoint (¤t_target
,
2400 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2401 struct bp_target_info
*bp_tgt
)
2403 /* This is kind of a weird case to handle, but the permission might
2404 have been changed after breakpoints were inserted - in which case
2405 we should just take the user literally and assume that any
2406 breakpoints should be left in place. */
2407 if (!may_insert_breakpoints
)
2409 warning (_("May not remove breakpoints"));
2413 return current_target
.to_remove_breakpoint (¤t_target
,
2418 target_info (char *args
, int from_tty
)
2420 struct target_ops
*t
;
2421 int has_all_mem
= 0;
2423 if (symfile_objfile
!= NULL
)
2424 printf_unfiltered (_("Symbols from \"%s\".\n"),
2425 objfile_name (symfile_objfile
));
2427 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2429 if (!(*t
->to_has_memory
) (t
))
2432 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2435 printf_unfiltered (_("\tWhile running this, "
2436 "GDB does not access memory from...\n"));
2437 printf_unfiltered ("%s:\n", t
->to_longname
);
2438 (t
->to_files_info
) (t
);
2439 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2443 /* This function is called before any new inferior is created, e.g.
2444 by running a program, attaching, or connecting to a target.
2445 It cleans up any state from previous invocations which might
2446 change between runs. This is a subset of what target_preopen
2447 resets (things which might change between targets). */
2450 target_pre_inferior (int from_tty
)
2452 /* Clear out solib state. Otherwise the solib state of the previous
2453 inferior might have survived and is entirely wrong for the new
2454 target. This has been observed on GNU/Linux using glibc 2.3. How
2466 Cannot access memory at address 0xdeadbeef
2469 /* In some OSs, the shared library list is the same/global/shared
2470 across inferiors. If code is shared between processes, so are
2471 memory regions and features. */
2472 if (!gdbarch_has_global_solist (target_gdbarch ()))
2474 no_shared_libraries (NULL
, from_tty
);
2476 invalidate_target_mem_regions ();
2478 target_clear_description ();
2481 agent_capability_invalidate ();
2484 /* Callback for iterate_over_inferiors. Gets rid of the given
2488 dispose_inferior (struct inferior
*inf
, void *args
)
2490 struct thread_info
*thread
;
2492 thread
= any_thread_of_process (inf
->pid
);
2495 switch_to_thread (thread
->ptid
);
2497 /* Core inferiors actually should be detached, not killed. */
2498 if (target_has_execution
)
2501 target_detach (NULL
, 0);
2507 /* This is to be called by the open routine before it does
2511 target_preopen (int from_tty
)
2515 if (have_inferiors ())
2518 || !have_live_inferiors ()
2519 || query (_("A program is being debugged already. Kill it? ")))
2520 iterate_over_inferiors (dispose_inferior
, NULL
);
2522 error (_("Program not killed."));
2525 /* Calling target_kill may remove the target from the stack. But if
2526 it doesn't (which seems like a win for UDI), remove it now. */
2527 /* Leave the exec target, though. The user may be switching from a
2528 live process to a core of the same program. */
2529 pop_all_targets_above (file_stratum
);
2531 target_pre_inferior (from_tty
);
2534 /* Detach a target after doing deferred register stores. */
2537 target_detach (const char *args
, int from_tty
)
2539 struct target_ops
* t
;
2541 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2542 /* Don't remove global breakpoints here. They're removed on
2543 disconnection from the target. */
2546 /* If we're in breakpoints-always-inserted mode, have to remove
2547 them before detaching. */
2548 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2550 prepare_for_detach ();
2552 current_target
.to_detach (¤t_target
, args
, from_tty
);
2554 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2559 target_disconnect (char *args
, int from_tty
)
2561 struct target_ops
*t
;
2563 /* If we're in breakpoints-always-inserted mode or if breakpoints
2564 are global across processes, we have to remove them before
2566 remove_breakpoints ();
2568 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2569 if (t
->to_disconnect
!= NULL
)
2572 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2574 t
->to_disconnect (t
, args
, from_tty
);
2582 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2584 struct target_ops
*t
;
2585 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2590 char *status_string
;
2591 char *options_string
;
2593 status_string
= target_waitstatus_to_string (status
);
2594 options_string
= target_options_to_string (options
);
2595 fprintf_unfiltered (gdb_stdlog
,
2596 "target_wait (%d, status, options={%s})"
2598 ptid_get_pid (ptid
), options_string
,
2599 ptid_get_pid (retval
), status_string
);
2600 xfree (status_string
);
2601 xfree (options_string
);
2608 target_pid_to_str (ptid_t ptid
)
2610 struct target_ops
*t
;
2612 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2614 if (t
->to_pid_to_str
!= NULL
)
2615 return (*t
->to_pid_to_str
) (t
, ptid
);
2618 return normal_pid_to_str (ptid
);
2622 target_thread_name (struct thread_info
*info
)
2624 return current_target
.to_thread_name (¤t_target
, info
);
2628 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2630 struct target_ops
*t
;
2632 target_dcache_invalidate ();
2634 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2636 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2637 ptid_get_pid (ptid
),
2638 step
? "step" : "continue",
2639 gdb_signal_to_name (signal
));
2641 registers_changed_ptid (ptid
);
2642 set_executing (ptid
, 1);
2643 set_running (ptid
, 1);
2644 clear_inline_frame_state (ptid
);
2648 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2650 struct target_ops
*t
;
2652 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2654 if (t
->to_pass_signals
!= NULL
)
2660 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2663 for (i
= 0; i
< numsigs
; i
++)
2664 if (pass_signals
[i
])
2665 fprintf_unfiltered (gdb_stdlog
, " %s",
2666 gdb_signal_to_name (i
));
2668 fprintf_unfiltered (gdb_stdlog
, " })\n");
2671 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2678 target_program_signals (int numsigs
, unsigned char *program_signals
)
2680 struct target_ops
*t
;
2682 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2684 if (t
->to_program_signals
!= NULL
)
2690 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2693 for (i
= 0; i
< numsigs
; i
++)
2694 if (program_signals
[i
])
2695 fprintf_unfiltered (gdb_stdlog
, " %s",
2696 gdb_signal_to_name (i
));
2698 fprintf_unfiltered (gdb_stdlog
, " })\n");
2701 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2707 /* Look through the list of possible targets for a target that can
2711 target_follow_fork (int follow_child
, int detach_fork
)
2713 struct target_ops
*t
;
2715 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2717 if (t
->to_follow_fork
!= NULL
)
2719 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2722 fprintf_unfiltered (gdb_stdlog
,
2723 "target_follow_fork (%d, %d) = %d\n",
2724 follow_child
, detach_fork
, retval
);
2729 /* Some target returned a fork event, but did not know how to follow it. */
2730 internal_error (__FILE__
, __LINE__
,
2731 _("could not find a target to follow fork"));
2735 target_mourn_inferior (void)
2737 struct target_ops
*t
;
2739 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2741 if (t
->to_mourn_inferior
!= NULL
)
2743 t
->to_mourn_inferior (t
);
2745 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2747 /* We no longer need to keep handles on any of the object files.
2748 Make sure to release them to avoid unnecessarily locking any
2749 of them while we're not actually debugging. */
2750 bfd_cache_close_all ();
2756 internal_error (__FILE__
, __LINE__
,
2757 _("could not find a target to follow mourn inferior"));
2760 /* Look for a target which can describe architectural features, starting
2761 from TARGET. If we find one, return its description. */
2763 const struct target_desc
*
2764 target_read_description (struct target_ops
*target
)
2766 struct target_ops
*t
;
2768 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2769 if (t
->to_read_description
!= NULL
)
2771 const struct target_desc
*tdesc
;
2773 tdesc
= t
->to_read_description (t
);
2781 /* The default implementation of to_search_memory.
2782 This implements a basic search of memory, reading target memory and
2783 performing the search here (as opposed to performing the search in on the
2784 target side with, for example, gdbserver). */
2787 simple_search_memory (struct target_ops
*ops
,
2788 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2789 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2790 CORE_ADDR
*found_addrp
)
2792 /* NOTE: also defined in find.c testcase. */
2793 #define SEARCH_CHUNK_SIZE 16000
2794 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2795 /* Buffer to hold memory contents for searching. */
2796 gdb_byte
*search_buf
;
2797 unsigned search_buf_size
;
2798 struct cleanup
*old_cleanups
;
2800 search_buf_size
= chunk_size
+ pattern_len
- 1;
2802 /* No point in trying to allocate a buffer larger than the search space. */
2803 if (search_space_len
< search_buf_size
)
2804 search_buf_size
= search_space_len
;
2806 search_buf
= malloc (search_buf_size
);
2807 if (search_buf
== NULL
)
2808 error (_("Unable to allocate memory to perform the search."));
2809 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2811 /* Prime the search buffer. */
2813 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2814 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2816 warning (_("Unable to access %s bytes of target "
2817 "memory at %s, halting search."),
2818 pulongest (search_buf_size
), hex_string (start_addr
));
2819 do_cleanups (old_cleanups
);
2823 /* Perform the search.
2825 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2826 When we've scanned N bytes we copy the trailing bytes to the start and
2827 read in another N bytes. */
2829 while (search_space_len
>= pattern_len
)
2831 gdb_byte
*found_ptr
;
2832 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2834 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2835 pattern
, pattern_len
);
2837 if (found_ptr
!= NULL
)
2839 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2841 *found_addrp
= found_addr
;
2842 do_cleanups (old_cleanups
);
2846 /* Not found in this chunk, skip to next chunk. */
2848 /* Don't let search_space_len wrap here, it's unsigned. */
2849 if (search_space_len
>= chunk_size
)
2850 search_space_len
-= chunk_size
;
2852 search_space_len
= 0;
2854 if (search_space_len
>= pattern_len
)
2856 unsigned keep_len
= search_buf_size
- chunk_size
;
2857 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2860 /* Copy the trailing part of the previous iteration to the front
2861 of the buffer for the next iteration. */
2862 gdb_assert (keep_len
== pattern_len
- 1);
2863 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2865 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2867 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2868 search_buf
+ keep_len
, read_addr
,
2869 nr_to_read
) != nr_to_read
)
2871 warning (_("Unable to access %s bytes of target "
2872 "memory at %s, halting search."),
2873 plongest (nr_to_read
),
2874 hex_string (read_addr
));
2875 do_cleanups (old_cleanups
);
2879 start_addr
+= chunk_size
;
2885 do_cleanups (old_cleanups
);
2889 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2890 sequence of bytes in PATTERN with length PATTERN_LEN.
2892 The result is 1 if found, 0 if not found, and -1 if there was an error
2893 requiring halting of the search (e.g. memory read error).
2894 If the pattern is found the address is recorded in FOUND_ADDRP. */
2897 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2898 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2899 CORE_ADDR
*found_addrp
)
2901 struct target_ops
*t
;
2904 /* We don't use INHERIT to set current_target.to_search_memory,
2905 so we have to scan the target stack and handle targetdebug
2909 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2910 hex_string (start_addr
));
2912 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2913 if (t
->to_search_memory
!= NULL
)
2918 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2919 pattern
, pattern_len
, found_addrp
);
2923 /* If a special version of to_search_memory isn't available, use the
2925 found
= simple_search_memory (current_target
.beneath
,
2926 start_addr
, search_space_len
,
2927 pattern
, pattern_len
, found_addrp
);
2931 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2936 /* Look through the currently pushed targets. If none of them will
2937 be able to restart the currently running process, issue an error
2941 target_require_runnable (void)
2943 struct target_ops
*t
;
2945 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2947 /* If this target knows how to create a new program, then
2948 assume we will still be able to after killing the current
2949 one. Either killing and mourning will not pop T, or else
2950 find_default_run_target will find it again. */
2951 if (t
->to_create_inferior
!= NULL
)
2954 /* Do not worry about thread_stratum targets that can not
2955 create inferiors. Assume they will be pushed again if
2956 necessary, and continue to the process_stratum. */
2957 if (t
->to_stratum
== thread_stratum
2958 || t
->to_stratum
== arch_stratum
)
2961 error (_("The \"%s\" target does not support \"run\". "
2962 "Try \"help target\" or \"continue\"."),
2966 /* This function is only called if the target is running. In that
2967 case there should have been a process_stratum target and it
2968 should either know how to create inferiors, or not... */
2969 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2972 /* Look through the list of possible targets for a target that can
2973 execute a run or attach command without any other data. This is
2974 used to locate the default process stratum.
2976 If DO_MESG is not NULL, the result is always valid (error() is
2977 called for errors); else, return NULL on error. */
2979 static struct target_ops
*
2980 find_default_run_target (char *do_mesg
)
2982 struct target_ops
**t
;
2983 struct target_ops
*runable
= NULL
;
2988 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2991 if ((*t
)->to_can_run
&& target_can_run (*t
))
3001 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3010 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3012 struct target_ops
*t
;
3014 t
= find_default_run_target ("attach");
3015 (t
->to_attach
) (t
, args
, from_tty
);
3020 find_default_create_inferior (struct target_ops
*ops
,
3021 char *exec_file
, char *allargs
, char **env
,
3024 struct target_ops
*t
;
3026 t
= find_default_run_target ("run");
3027 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3032 find_default_can_async_p (struct target_ops
*ignore
)
3034 struct target_ops
*t
;
3036 /* This may be called before the target is pushed on the stack;
3037 look for the default process stratum. If there's none, gdb isn't
3038 configured with a native debugger, and target remote isn't
3040 t
= find_default_run_target (NULL
);
3041 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3042 return (t
->to_can_async_p
) (t
);
3047 find_default_is_async_p (struct target_ops
*ignore
)
3049 struct target_ops
*t
;
3051 /* This may be called before the target is pushed on the stack;
3052 look for the default process stratum. If there's none, gdb isn't
3053 configured with a native debugger, and target remote isn't
3055 t
= find_default_run_target (NULL
);
3056 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3057 return (t
->to_is_async_p
) (t
);
3062 find_default_supports_non_stop (struct target_ops
*self
)
3064 struct target_ops
*t
;
3066 t
= find_default_run_target (NULL
);
3067 if (t
&& t
->to_supports_non_stop
)
3068 return (t
->to_supports_non_stop
) (t
);
3073 target_supports_non_stop (void)
3075 struct target_ops
*t
;
3077 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3078 if (t
->to_supports_non_stop
)
3079 return t
->to_supports_non_stop (t
);
3084 /* Implement the "info proc" command. */
3087 target_info_proc (char *args
, enum info_proc_what what
)
3089 struct target_ops
*t
;
3091 /* If we're already connected to something that can get us OS
3092 related data, use it. Otherwise, try using the native
3094 if (current_target
.to_stratum
>= process_stratum
)
3095 t
= current_target
.beneath
;
3097 t
= find_default_run_target (NULL
);
3099 for (; t
!= NULL
; t
= t
->beneath
)
3101 if (t
->to_info_proc
!= NULL
)
3103 t
->to_info_proc (t
, args
, what
);
3106 fprintf_unfiltered (gdb_stdlog
,
3107 "target_info_proc (\"%s\", %d)\n", args
, what
);
3117 find_default_supports_disable_randomization (struct target_ops
*self
)
3119 struct target_ops
*t
;
3121 t
= find_default_run_target (NULL
);
3122 if (t
&& t
->to_supports_disable_randomization
)
3123 return (t
->to_supports_disable_randomization
) (t
);
3128 target_supports_disable_randomization (void)
3130 struct target_ops
*t
;
3132 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3133 if (t
->to_supports_disable_randomization
)
3134 return t
->to_supports_disable_randomization (t
);
3140 target_get_osdata (const char *type
)
3142 struct target_ops
*t
;
3144 /* If we're already connected to something that can get us OS
3145 related data, use it. Otherwise, try using the native
3147 if (current_target
.to_stratum
>= process_stratum
)
3148 t
= current_target
.beneath
;
3150 t
= find_default_run_target ("get OS data");
3155 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3158 /* Determine the current address space of thread PTID. */
3160 struct address_space
*
3161 target_thread_address_space (ptid_t ptid
)
3163 struct address_space
*aspace
;
3164 struct inferior
*inf
;
3165 struct target_ops
*t
;
3167 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3169 if (t
->to_thread_address_space
!= NULL
)
3171 aspace
= t
->to_thread_address_space (t
, ptid
);
3172 gdb_assert (aspace
);
3175 fprintf_unfiltered (gdb_stdlog
,
3176 "target_thread_address_space (%s) = %d\n",
3177 target_pid_to_str (ptid
),
3178 address_space_num (aspace
));
3183 /* Fall-back to the "main" address space of the inferior. */
3184 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3186 if (inf
== NULL
|| inf
->aspace
== NULL
)
3187 internal_error (__FILE__
, __LINE__
,
3188 _("Can't determine the current "
3189 "address space of thread %s\n"),
3190 target_pid_to_str (ptid
));
3196 /* Target file operations. */
3198 static struct target_ops
*
3199 default_fileio_target (void)
3201 /* If we're already connected to something that can perform
3202 file I/O, use it. Otherwise, try using the native target. */
3203 if (current_target
.to_stratum
>= process_stratum
)
3204 return current_target
.beneath
;
3206 return find_default_run_target ("file I/O");
3209 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3210 target file descriptor, or -1 if an error occurs (and set
3213 target_fileio_open (const char *filename
, int flags
, int mode
,
3216 struct target_ops
*t
;
3218 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3220 if (t
->to_fileio_open
!= NULL
)
3222 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3225 fprintf_unfiltered (gdb_stdlog
,
3226 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3227 filename
, flags
, mode
,
3228 fd
, fd
!= -1 ? 0 : *target_errno
);
3233 *target_errno
= FILEIO_ENOSYS
;
3237 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3238 Return the number of bytes written, or -1 if an error occurs
3239 (and set *TARGET_ERRNO). */
3241 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3242 ULONGEST offset
, int *target_errno
)
3244 struct target_ops
*t
;
3246 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3248 if (t
->to_fileio_pwrite
!= NULL
)
3250 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3254 fprintf_unfiltered (gdb_stdlog
,
3255 "target_fileio_pwrite (%d,...,%d,%s) "
3257 fd
, len
, pulongest (offset
),
3258 ret
, ret
!= -1 ? 0 : *target_errno
);
3263 *target_errno
= FILEIO_ENOSYS
;
3267 /* Read up to LEN bytes FD on the target into READ_BUF.
3268 Return the number of bytes read, or -1 if an error occurs
3269 (and set *TARGET_ERRNO). */
3271 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3272 ULONGEST offset
, int *target_errno
)
3274 struct target_ops
*t
;
3276 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3278 if (t
->to_fileio_pread
!= NULL
)
3280 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3284 fprintf_unfiltered (gdb_stdlog
,
3285 "target_fileio_pread (%d,...,%d,%s) "
3287 fd
, len
, pulongest (offset
),
3288 ret
, ret
!= -1 ? 0 : *target_errno
);
3293 *target_errno
= FILEIO_ENOSYS
;
3297 /* Close FD on the target. Return 0, or -1 if an error occurs
3298 (and set *TARGET_ERRNO). */
3300 target_fileio_close (int fd
, int *target_errno
)
3302 struct target_ops
*t
;
3304 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3306 if (t
->to_fileio_close
!= NULL
)
3308 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3311 fprintf_unfiltered (gdb_stdlog
,
3312 "target_fileio_close (%d) = %d (%d)\n",
3313 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3318 *target_errno
= FILEIO_ENOSYS
;
3322 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3323 occurs (and set *TARGET_ERRNO). */
3325 target_fileio_unlink (const char *filename
, int *target_errno
)
3327 struct target_ops
*t
;
3329 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3331 if (t
->to_fileio_unlink
!= NULL
)
3333 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3336 fprintf_unfiltered (gdb_stdlog
,
3337 "target_fileio_unlink (%s) = %d (%d)\n",
3338 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3343 *target_errno
= FILEIO_ENOSYS
;
3347 /* Read value of symbolic link FILENAME on the target. Return a
3348 null-terminated string allocated via xmalloc, or NULL if an error
3349 occurs (and set *TARGET_ERRNO). */
3351 target_fileio_readlink (const char *filename
, int *target_errno
)
3353 struct target_ops
*t
;
3355 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3357 if (t
->to_fileio_readlink
!= NULL
)
3359 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3362 fprintf_unfiltered (gdb_stdlog
,
3363 "target_fileio_readlink (%s) = %s (%d)\n",
3364 filename
, ret
? ret
: "(nil)",
3365 ret
? 0 : *target_errno
);
3370 *target_errno
= FILEIO_ENOSYS
;
3375 target_fileio_close_cleanup (void *opaque
)
3377 int fd
= *(int *) opaque
;
3380 target_fileio_close (fd
, &target_errno
);
3383 /* Read target file FILENAME. Store the result in *BUF_P and
3384 return the size of the transferred data. PADDING additional bytes are
3385 available in *BUF_P. This is a helper function for
3386 target_fileio_read_alloc; see the declaration of that function for more
3390 target_fileio_read_alloc_1 (const char *filename
,
3391 gdb_byte
**buf_p
, int padding
)
3393 struct cleanup
*close_cleanup
;
3394 size_t buf_alloc
, buf_pos
;
3400 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3404 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3406 /* Start by reading up to 4K at a time. The target will throttle
3407 this number down if necessary. */
3409 buf
= xmalloc (buf_alloc
);
3413 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3414 buf_alloc
- buf_pos
- padding
, buf_pos
,
3418 /* An error occurred. */
3419 do_cleanups (close_cleanup
);
3425 /* Read all there was. */
3426 do_cleanups (close_cleanup
);
3436 /* If the buffer is filling up, expand it. */
3437 if (buf_alloc
< buf_pos
* 2)
3440 buf
= xrealloc (buf
, buf_alloc
);
3447 /* Read target file FILENAME. Store the result in *BUF_P and return
3448 the size of the transferred data. See the declaration in "target.h"
3449 function for more information about the return value. */
3452 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3454 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3457 /* Read target file FILENAME. The result is NUL-terminated and
3458 returned as a string, allocated using xmalloc. If an error occurs
3459 or the transfer is unsupported, NULL is returned. Empty objects
3460 are returned as allocated but empty strings. A warning is issued
3461 if the result contains any embedded NUL bytes. */
3464 target_fileio_read_stralloc (const char *filename
)
3468 LONGEST i
, transferred
;
3470 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3471 bufstr
= (char *) buffer
;
3473 if (transferred
< 0)
3476 if (transferred
== 0)
3477 return xstrdup ("");
3479 bufstr
[transferred
] = 0;
3481 /* Check for embedded NUL bytes; but allow trailing NULs. */
3482 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3485 warning (_("target file %s "
3486 "contained unexpected null characters"),
3496 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3497 CORE_ADDR addr
, int len
)
3499 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3503 default_watchpoint_addr_within_range (struct target_ops
*target
,
3505 CORE_ADDR start
, int length
)
3507 return addr
>= start
&& addr
< start
+ length
;
3510 static struct gdbarch
*
3511 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3513 return target_gdbarch ();
3523 return_minus_one (void)
3535 * Find the next target down the stack from the specified target.
3539 find_target_beneath (struct target_ops
*t
)
3547 find_target_at (enum strata stratum
)
3549 struct target_ops
*t
;
3551 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3552 if (t
->to_stratum
== stratum
)
3559 /* The inferior process has died. Long live the inferior! */
3562 generic_mourn_inferior (void)
3566 ptid
= inferior_ptid
;
3567 inferior_ptid
= null_ptid
;
3569 /* Mark breakpoints uninserted in case something tries to delete a
3570 breakpoint while we delete the inferior's threads (which would
3571 fail, since the inferior is long gone). */
3572 mark_breakpoints_out ();
3574 if (!ptid_equal (ptid
, null_ptid
))
3576 int pid
= ptid_get_pid (ptid
);
3577 exit_inferior (pid
);
3580 /* Note this wipes step-resume breakpoints, so needs to be done
3581 after exit_inferior, which ends up referencing the step-resume
3582 breakpoints through clear_thread_inferior_resources. */
3583 breakpoint_init_inferior (inf_exited
);
3585 registers_changed ();
3587 reopen_exec_file ();
3588 reinit_frame_cache ();
3590 if (deprecated_detach_hook
)
3591 deprecated_detach_hook ();
3594 /* Convert a normal process ID to a string. Returns the string in a
3598 normal_pid_to_str (ptid_t ptid
)
3600 static char buf
[32];
3602 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3607 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3609 return normal_pid_to_str (ptid
);
3612 /* Error-catcher for target_find_memory_regions. */
3614 dummy_find_memory_regions (struct target_ops
*self
,
3615 find_memory_region_ftype ignore1
, void *ignore2
)
3617 error (_("Command not implemented for this target."));
3621 /* Error-catcher for target_make_corefile_notes. */
3623 dummy_make_corefile_notes (struct target_ops
*self
,
3624 bfd
*ignore1
, int *ignore2
)
3626 error (_("Command not implemented for this target."));
3630 /* Set up the handful of non-empty slots needed by the dummy target
3634 init_dummy_target (void)
3636 dummy_target
.to_shortname
= "None";
3637 dummy_target
.to_longname
= "None";
3638 dummy_target
.to_doc
= "";
3639 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3640 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3641 dummy_target
.to_supports_disable_randomization
3642 = find_default_supports_disable_randomization
;
3643 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3644 dummy_target
.to_stratum
= dummy_stratum
;
3645 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3646 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3647 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3648 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3649 dummy_target
.to_has_execution
3650 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3651 dummy_target
.to_magic
= OPS_MAGIC
;
3653 install_dummy_methods (&dummy_target
);
3657 debug_to_open (char *args
, int from_tty
)
3659 debug_target
.to_open (args
, from_tty
);
3661 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3665 target_close (struct target_ops
*targ
)
3667 gdb_assert (!target_is_pushed (targ
));
3669 if (targ
->to_xclose
!= NULL
)
3670 targ
->to_xclose (targ
);
3671 else if (targ
->to_close
!= NULL
)
3672 targ
->to_close (targ
);
3675 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3679 target_attach (char *args
, int from_tty
)
3681 current_target
.to_attach (¤t_target
, args
, from_tty
);
3683 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3688 target_thread_alive (ptid_t ptid
)
3690 struct target_ops
*t
;
3692 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3694 if (t
->to_thread_alive
!= NULL
)
3698 retval
= t
->to_thread_alive (t
, ptid
);
3700 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3701 ptid_get_pid (ptid
), retval
);
3711 target_find_new_threads (void)
3713 struct target_ops
*t
;
3715 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3717 if (t
->to_find_new_threads
!= NULL
)
3719 t
->to_find_new_threads (t
);
3721 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3729 target_stop (ptid_t ptid
)
3733 warning (_("May not interrupt or stop the target, ignoring attempt"));
3737 (*current_target
.to_stop
) (¤t_target
, ptid
);
3741 debug_to_post_attach (struct target_ops
*self
, int pid
)
3743 debug_target
.to_post_attach (&debug_target
, pid
);
3745 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3748 /* Concatenate ELEM to LIST, a comma separate list, and return the
3749 result. The LIST incoming argument is released. */
3752 str_comma_list_concat_elem (char *list
, const char *elem
)
3755 return xstrdup (elem
);
3757 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3760 /* Helper for target_options_to_string. If OPT is present in
3761 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3762 Returns the new resulting string. OPT is removed from
3766 do_option (int *target_options
, char *ret
,
3767 int opt
, char *opt_str
)
3769 if ((*target_options
& opt
) != 0)
3771 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3772 *target_options
&= ~opt
;
3779 target_options_to_string (int target_options
)
3783 #define DO_TARG_OPTION(OPT) \
3784 ret = do_option (&target_options, ret, OPT, #OPT)
3786 DO_TARG_OPTION (TARGET_WNOHANG
);
3788 if (target_options
!= 0)
3789 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3797 debug_print_register (const char * func
,
3798 struct regcache
*regcache
, int regno
)
3800 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3802 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3803 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3804 && gdbarch_register_name (gdbarch
, regno
) != NULL
3805 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3806 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3807 gdbarch_register_name (gdbarch
, regno
));
3809 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3810 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3812 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3813 int i
, size
= register_size (gdbarch
, regno
);
3814 gdb_byte buf
[MAX_REGISTER_SIZE
];
3816 regcache_raw_collect (regcache
, regno
, buf
);
3817 fprintf_unfiltered (gdb_stdlog
, " = ");
3818 for (i
= 0; i
< size
; i
++)
3820 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3822 if (size
<= sizeof (LONGEST
))
3824 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3826 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3827 core_addr_to_string_nz (val
), plongest (val
));
3830 fprintf_unfiltered (gdb_stdlog
, "\n");
3834 target_fetch_registers (struct regcache
*regcache
, int regno
)
3836 struct target_ops
*t
;
3838 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3840 if (t
->to_fetch_registers
!= NULL
)
3842 t
->to_fetch_registers (t
, regcache
, regno
);
3844 debug_print_register ("target_fetch_registers", regcache
, regno
);
3851 target_store_registers (struct regcache
*regcache
, int regno
)
3853 struct target_ops
*t
;
3855 if (!may_write_registers
)
3856 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3858 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3861 debug_print_register ("target_store_registers", regcache
, regno
);
3866 target_core_of_thread (ptid_t ptid
)
3868 struct target_ops
*t
;
3870 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3872 if (t
->to_core_of_thread
!= NULL
)
3874 int retval
= t
->to_core_of_thread (t
, ptid
);
3877 fprintf_unfiltered (gdb_stdlog
,
3878 "target_core_of_thread (%d) = %d\n",
3879 ptid_get_pid (ptid
), retval
);
3888 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3890 struct target_ops
*t
;
3892 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3894 if (t
->to_verify_memory
!= NULL
)
3896 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3899 fprintf_unfiltered (gdb_stdlog
,
3900 "target_verify_memory (%s, %s) = %d\n",
3901 paddress (target_gdbarch (), memaddr
),
3911 /* The documentation for this function is in its prototype declaration in
3915 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3917 struct target_ops
*t
;
3919 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3920 if (t
->to_insert_mask_watchpoint
!= NULL
)
3924 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
3927 fprintf_unfiltered (gdb_stdlog
, "\
3928 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3929 core_addr_to_string (addr
),
3930 core_addr_to_string (mask
), rw
, ret
);
3938 /* The documentation for this function is in its prototype declaration in
3942 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3944 struct target_ops
*t
;
3946 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3947 if (t
->to_remove_mask_watchpoint
!= NULL
)
3951 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
3954 fprintf_unfiltered (gdb_stdlog
, "\
3955 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3956 core_addr_to_string (addr
),
3957 core_addr_to_string (mask
), rw
, ret
);
3965 /* The documentation for this function is in its prototype declaration
3969 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3971 struct target_ops
*t
;
3973 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3974 if (t
->to_masked_watch_num_registers
!= NULL
)
3975 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
3980 /* The documentation for this function is in its prototype declaration
3984 target_ranged_break_num_registers (void)
3986 struct target_ops
*t
;
3988 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3989 if (t
->to_ranged_break_num_registers
!= NULL
)
3990 return t
->to_ranged_break_num_registers (t
);
3997 struct btrace_target_info
*
3998 target_enable_btrace (ptid_t ptid
)
4000 struct target_ops
*t
;
4002 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4003 if (t
->to_enable_btrace
!= NULL
)
4004 return t
->to_enable_btrace (t
, ptid
);
4013 target_disable_btrace (struct btrace_target_info
*btinfo
)
4015 struct target_ops
*t
;
4017 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4018 if (t
->to_disable_btrace
!= NULL
)
4020 t
->to_disable_btrace (t
, btinfo
);
4030 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4032 struct target_ops
*t
;
4034 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4035 if (t
->to_teardown_btrace
!= NULL
)
4037 t
->to_teardown_btrace (t
, btinfo
);
4047 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4048 struct btrace_target_info
*btinfo
,
4049 enum btrace_read_type type
)
4051 struct target_ops
*t
;
4053 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4054 if (t
->to_read_btrace
!= NULL
)
4055 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
4058 return BTRACE_ERR_NOT_SUPPORTED
;
4064 target_stop_recording (void)
4066 struct target_ops
*t
;
4068 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4069 if (t
->to_stop_recording
!= NULL
)
4071 t
->to_stop_recording (t
);
4075 /* This is optional. */
4081 target_info_record (void)
4083 struct target_ops
*t
;
4085 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4086 if (t
->to_info_record
!= NULL
)
4088 t
->to_info_record (t
);
4098 target_save_record (const char *filename
)
4100 struct target_ops
*t
;
4102 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4103 if (t
->to_save_record
!= NULL
)
4105 t
->to_save_record (t
, filename
);
4115 target_supports_delete_record (void)
4117 struct target_ops
*t
;
4119 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4120 if (t
->to_delete_record
!= NULL
)
4129 target_delete_record (void)
4131 struct target_ops
*t
;
4133 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4134 if (t
->to_delete_record
!= NULL
)
4136 t
->to_delete_record (t
);
4146 target_record_is_replaying (void)
4148 struct target_ops
*t
;
4150 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4151 if (t
->to_record_is_replaying
!= NULL
)
4152 return t
->to_record_is_replaying (t
);
4160 target_goto_record_begin (void)
4162 struct target_ops
*t
;
4164 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4165 if (t
->to_goto_record_begin
!= NULL
)
4167 t
->to_goto_record_begin (t
);
4177 target_goto_record_end (void)
4179 struct target_ops
*t
;
4181 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4182 if (t
->to_goto_record_end
!= NULL
)
4184 t
->to_goto_record_end (t
);
4194 target_goto_record (ULONGEST insn
)
4196 struct target_ops
*t
;
4198 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4199 if (t
->to_goto_record
!= NULL
)
4201 t
->to_goto_record (t
, insn
);
4211 target_insn_history (int size
, int flags
)
4213 struct target_ops
*t
;
4215 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4216 if (t
->to_insn_history
!= NULL
)
4218 t
->to_insn_history (t
, size
, flags
);
4228 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4230 struct target_ops
*t
;
4232 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4233 if (t
->to_insn_history_from
!= NULL
)
4235 t
->to_insn_history_from (t
, from
, size
, flags
);
4245 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4247 struct target_ops
*t
;
4249 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4250 if (t
->to_insn_history_range
!= NULL
)
4252 t
->to_insn_history_range (t
, begin
, end
, flags
);
4262 target_call_history (int size
, int flags
)
4264 struct target_ops
*t
;
4266 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4267 if (t
->to_call_history
!= NULL
)
4269 t
->to_call_history (t
, size
, flags
);
4279 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4281 struct target_ops
*t
;
4283 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4284 if (t
->to_call_history_from
!= NULL
)
4286 t
->to_call_history_from (t
, begin
, size
, flags
);
4296 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4298 struct target_ops
*t
;
4300 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4301 if (t
->to_call_history_range
!= NULL
)
4303 t
->to_call_history_range (t
, begin
, end
, flags
);
4311 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4313 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4315 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4320 const struct frame_unwind
*
4321 target_get_unwinder (void)
4323 struct target_ops
*t
;
4325 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4326 if (t
->to_get_unwinder
!= NULL
)
4327 return t
->to_get_unwinder
;
4334 const struct frame_unwind
*
4335 target_get_tailcall_unwinder (void)
4337 struct target_ops
*t
;
4339 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4340 if (t
->to_get_tailcall_unwinder
!= NULL
)
4341 return t
->to_get_tailcall_unwinder
;
4349 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4350 struct gdbarch
*gdbarch
)
4352 for (; ops
!= NULL
; ops
= ops
->beneath
)
4353 if (ops
->to_decr_pc_after_break
!= NULL
)
4354 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4356 return gdbarch_decr_pc_after_break (gdbarch
);
4362 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4364 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4368 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4369 int write
, struct mem_attrib
*attrib
,
4370 struct target_ops
*target
)
4374 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4377 fprintf_unfiltered (gdb_stdlog
,
4378 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4379 paddress (target_gdbarch (), memaddr
), len
,
4380 write
? "write" : "read", retval
);
4386 fputs_unfiltered (", bytes =", gdb_stdlog
);
4387 for (i
= 0; i
< retval
; i
++)
4389 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4391 if (targetdebug
< 2 && i
> 0)
4393 fprintf_unfiltered (gdb_stdlog
, " ...");
4396 fprintf_unfiltered (gdb_stdlog
, "\n");
4399 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4403 fputc_unfiltered ('\n', gdb_stdlog
);
4409 debug_to_files_info (struct target_ops
*target
)
4411 debug_target
.to_files_info (target
);
4413 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4417 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4418 struct bp_target_info
*bp_tgt
)
4422 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4424 fprintf_unfiltered (gdb_stdlog
,
4425 "target_insert_breakpoint (%s, xxx) = %ld\n",
4426 core_addr_to_string (bp_tgt
->placed_address
),
4427 (unsigned long) retval
);
4432 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4433 struct bp_target_info
*bp_tgt
)
4437 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4439 fprintf_unfiltered (gdb_stdlog
,
4440 "target_remove_breakpoint (%s, xxx) = %ld\n",
4441 core_addr_to_string (bp_tgt
->placed_address
),
4442 (unsigned long) retval
);
4447 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4448 int type
, int cnt
, int from_tty
)
4452 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4453 type
, cnt
, from_tty
);
4455 fprintf_unfiltered (gdb_stdlog
,
4456 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4457 (unsigned long) type
,
4458 (unsigned long) cnt
,
4459 (unsigned long) from_tty
,
4460 (unsigned long) retval
);
4465 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4466 CORE_ADDR addr
, int len
)
4470 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4473 fprintf_unfiltered (gdb_stdlog
,
4474 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4475 core_addr_to_string (addr
), (unsigned long) len
,
4476 core_addr_to_string (retval
));
4481 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4482 CORE_ADDR addr
, int len
, int rw
,
4483 struct expression
*cond
)
4487 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4491 fprintf_unfiltered (gdb_stdlog
,
4492 "target_can_accel_watchpoint_condition "
4493 "(%s, %d, %d, %s) = %ld\n",
4494 core_addr_to_string (addr
), len
, rw
,
4495 host_address_to_string (cond
), (unsigned long) retval
);
4500 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4504 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4506 fprintf_unfiltered (gdb_stdlog
,
4507 "target_stopped_by_watchpoint () = %ld\n",
4508 (unsigned long) retval
);
4513 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4517 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4519 fprintf_unfiltered (gdb_stdlog
,
4520 "target_stopped_data_address ([%s]) = %ld\n",
4521 core_addr_to_string (*addr
),
4522 (unsigned long)retval
);
4527 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4529 CORE_ADDR start
, int length
)
4533 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4536 fprintf_filtered (gdb_stdlog
,
4537 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4538 core_addr_to_string (addr
), core_addr_to_string (start
),
4544 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4545 struct gdbarch
*gdbarch
,
4546 struct bp_target_info
*bp_tgt
)
4550 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4553 fprintf_unfiltered (gdb_stdlog
,
4554 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4555 core_addr_to_string (bp_tgt
->placed_address
),
4556 (unsigned long) retval
);
4561 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4562 struct gdbarch
*gdbarch
,
4563 struct bp_target_info
*bp_tgt
)
4567 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4570 fprintf_unfiltered (gdb_stdlog
,
4571 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4572 core_addr_to_string (bp_tgt
->placed_address
),
4573 (unsigned long) retval
);
4578 debug_to_insert_watchpoint (struct target_ops
*self
,
4579 CORE_ADDR addr
, int len
, int type
,
4580 struct expression
*cond
)
4584 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4585 addr
, len
, type
, cond
);
4587 fprintf_unfiltered (gdb_stdlog
,
4588 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4589 core_addr_to_string (addr
), len
, type
,
4590 host_address_to_string (cond
), (unsigned long) retval
);
4595 debug_to_remove_watchpoint (struct target_ops
*self
,
4596 CORE_ADDR addr
, int len
, int type
,
4597 struct expression
*cond
)
4601 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4602 addr
, len
, type
, cond
);
4604 fprintf_unfiltered (gdb_stdlog
,
4605 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4606 core_addr_to_string (addr
), len
, type
,
4607 host_address_to_string (cond
), (unsigned long) retval
);
4612 debug_to_terminal_init (struct target_ops
*self
)
4614 debug_target
.to_terminal_init (&debug_target
);
4616 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4620 debug_to_terminal_inferior (struct target_ops
*self
)
4622 debug_target
.to_terminal_inferior (&debug_target
);
4624 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4628 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4630 debug_target
.to_terminal_ours_for_output (&debug_target
);
4632 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4636 debug_to_terminal_ours (struct target_ops
*self
)
4638 debug_target
.to_terminal_ours (&debug_target
);
4640 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4644 debug_to_terminal_save_ours (struct target_ops
*self
)
4646 debug_target
.to_terminal_save_ours (&debug_target
);
4648 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4652 debug_to_terminal_info (struct target_ops
*self
,
4653 const char *arg
, int from_tty
)
4655 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4657 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4662 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4664 debug_target
.to_load (&debug_target
, args
, from_tty
);
4666 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4670 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4672 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4674 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4675 ptid_get_pid (ptid
));
4679 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4683 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4685 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4692 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4696 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4698 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4705 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4709 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4711 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4718 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4722 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4724 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4731 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4735 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4737 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4744 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4748 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4750 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4757 debug_to_has_exited (struct target_ops
*self
,
4758 int pid
, int wait_status
, int *exit_status
)
4762 has_exited
= debug_target
.to_has_exited (&debug_target
,
4763 pid
, wait_status
, exit_status
);
4765 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4766 pid
, wait_status
, *exit_status
, has_exited
);
4772 debug_to_can_run (struct target_ops
*self
)
4776 retval
= debug_target
.to_can_run (&debug_target
);
4778 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4783 static struct gdbarch
*
4784 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4786 struct gdbarch
*retval
;
4788 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4790 fprintf_unfiltered (gdb_stdlog
,
4791 "target_thread_architecture (%s) = %s [%s]\n",
4792 target_pid_to_str (ptid
),
4793 host_address_to_string (retval
),
4794 gdbarch_bfd_arch_info (retval
)->printable_name
);
4799 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4801 debug_target
.to_stop (&debug_target
, ptid
);
4803 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4804 target_pid_to_str (ptid
));
4808 debug_to_rcmd (struct target_ops
*self
, char *command
,
4809 struct ui_file
*outbuf
)
4811 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4812 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4816 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4820 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4822 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4829 setup_target_debug (void)
4831 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4833 current_target
.to_open
= debug_to_open
;
4834 current_target
.to_post_attach
= debug_to_post_attach
;
4835 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4836 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4837 current_target
.to_files_info
= debug_to_files_info
;
4838 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4839 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4840 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4841 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4842 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4843 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4844 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4845 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4846 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4847 current_target
.to_watchpoint_addr_within_range
4848 = debug_to_watchpoint_addr_within_range
;
4849 current_target
.to_region_ok_for_hw_watchpoint
4850 = debug_to_region_ok_for_hw_watchpoint
;
4851 current_target
.to_can_accel_watchpoint_condition
4852 = debug_to_can_accel_watchpoint_condition
;
4853 current_target
.to_terminal_init
= debug_to_terminal_init
;
4854 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4855 current_target
.to_terminal_ours_for_output
4856 = debug_to_terminal_ours_for_output
;
4857 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4858 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4859 current_target
.to_terminal_info
= debug_to_terminal_info
;
4860 current_target
.to_load
= debug_to_load
;
4861 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4862 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4863 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4864 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4865 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4866 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4867 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4868 current_target
.to_has_exited
= debug_to_has_exited
;
4869 current_target
.to_can_run
= debug_to_can_run
;
4870 current_target
.to_stop
= debug_to_stop
;
4871 current_target
.to_rcmd
= debug_to_rcmd
;
4872 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4873 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4877 static char targ_desc
[] =
4878 "Names of targets and files being debugged.\nShows the entire \
4879 stack of targets currently in use (including the exec-file,\n\
4880 core-file, and process, if any), as well as the symbol file name.";
4883 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4885 error (_("\"monitor\" command not supported by this target."));
4889 do_monitor_command (char *cmd
,
4892 target_rcmd (cmd
, gdb_stdtarg
);
4895 /* Print the name of each layers of our target stack. */
4898 maintenance_print_target_stack (char *cmd
, int from_tty
)
4900 struct target_ops
*t
;
4902 printf_filtered (_("The current target stack is:\n"));
4904 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4906 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4910 /* Controls if async mode is permitted. */
4911 int target_async_permitted
= 0;
4913 /* The set command writes to this variable. If the inferior is
4914 executing, target_async_permitted is *not* updated. */
4915 static int target_async_permitted_1
= 0;
4918 set_target_async_command (char *args
, int from_tty
,
4919 struct cmd_list_element
*c
)
4921 if (have_live_inferiors ())
4923 target_async_permitted_1
= target_async_permitted
;
4924 error (_("Cannot change this setting while the inferior is running."));
4927 target_async_permitted
= target_async_permitted_1
;
4931 show_target_async_command (struct ui_file
*file
, int from_tty
,
4932 struct cmd_list_element
*c
,
4935 fprintf_filtered (file
,
4936 _("Controlling the inferior in "
4937 "asynchronous mode is %s.\n"), value
);
4940 /* Temporary copies of permission settings. */
4942 static int may_write_registers_1
= 1;
4943 static int may_write_memory_1
= 1;
4944 static int may_insert_breakpoints_1
= 1;
4945 static int may_insert_tracepoints_1
= 1;
4946 static int may_insert_fast_tracepoints_1
= 1;
4947 static int may_stop_1
= 1;
4949 /* Make the user-set values match the real values again. */
4952 update_target_permissions (void)
4954 may_write_registers_1
= may_write_registers
;
4955 may_write_memory_1
= may_write_memory
;
4956 may_insert_breakpoints_1
= may_insert_breakpoints
;
4957 may_insert_tracepoints_1
= may_insert_tracepoints
;
4958 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4959 may_stop_1
= may_stop
;
4962 /* The one function handles (most of) the permission flags in the same
4966 set_target_permissions (char *args
, int from_tty
,
4967 struct cmd_list_element
*c
)
4969 if (target_has_execution
)
4971 update_target_permissions ();
4972 error (_("Cannot change this setting while the inferior is running."));
4975 /* Make the real values match the user-changed values. */
4976 may_write_registers
= may_write_registers_1
;
4977 may_insert_breakpoints
= may_insert_breakpoints_1
;
4978 may_insert_tracepoints
= may_insert_tracepoints_1
;
4979 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4980 may_stop
= may_stop_1
;
4981 update_observer_mode ();
4984 /* Set memory write permission independently of observer mode. */
4987 set_write_memory_permission (char *args
, int from_tty
,
4988 struct cmd_list_element
*c
)
4990 /* Make the real values match the user-changed values. */
4991 may_write_memory
= may_write_memory_1
;
4992 update_observer_mode ();
4997 initialize_targets (void)
4999 init_dummy_target ();
5000 push_target (&dummy_target
);
5002 add_info ("target", target_info
, targ_desc
);
5003 add_info ("files", target_info
, targ_desc
);
5005 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5006 Set target debugging."), _("\
5007 Show target debugging."), _("\
5008 When non-zero, target debugging is enabled. Higher numbers are more\n\
5009 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5013 &setdebuglist
, &showdebuglist
);
5015 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5016 &trust_readonly
, _("\
5017 Set mode for reading from readonly sections."), _("\
5018 Show mode for reading from readonly sections."), _("\
5019 When this mode is on, memory reads from readonly sections (such as .text)\n\
5020 will be read from the object file instead of from the target. This will\n\
5021 result in significant performance improvement for remote targets."),
5023 show_trust_readonly
,
5024 &setlist
, &showlist
);
5026 add_com ("monitor", class_obscure
, do_monitor_command
,
5027 _("Send a command to the remote monitor (remote targets only)."));
5029 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5030 _("Print the name of each layer of the internal target stack."),
5031 &maintenanceprintlist
);
5033 add_setshow_boolean_cmd ("target-async", no_class
,
5034 &target_async_permitted_1
, _("\
5035 Set whether gdb controls the inferior in asynchronous mode."), _("\
5036 Show whether gdb controls the inferior in asynchronous mode."), _("\
5037 Tells gdb whether to control the inferior in asynchronous mode."),
5038 set_target_async_command
,
5039 show_target_async_command
,
5043 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5044 &may_write_registers_1
, _("\
5045 Set permission to write into registers."), _("\
5046 Show permission to write into registers."), _("\
5047 When this permission is on, GDB may write into the target's registers.\n\
5048 Otherwise, any sort of write attempt will result in an error."),
5049 set_target_permissions
, NULL
,
5050 &setlist
, &showlist
);
5052 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5053 &may_write_memory_1
, _("\
5054 Set permission to write into target memory."), _("\
5055 Show permission to write into target memory."), _("\
5056 When this permission is on, GDB may write into the target's memory.\n\
5057 Otherwise, any sort of write attempt will result in an error."),
5058 set_write_memory_permission
, NULL
,
5059 &setlist
, &showlist
);
5061 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5062 &may_insert_breakpoints_1
, _("\
5063 Set permission to insert breakpoints in the target."), _("\
5064 Show permission to insert breakpoints in the target."), _("\
5065 When this permission is on, GDB may insert breakpoints in the program.\n\
5066 Otherwise, any sort of insertion attempt will result in an error."),
5067 set_target_permissions
, NULL
,
5068 &setlist
, &showlist
);
5070 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5071 &may_insert_tracepoints_1
, _("\
5072 Set permission to insert tracepoints in the target."), _("\
5073 Show permission to insert tracepoints in the target."), _("\
5074 When this permission is on, GDB may insert tracepoints in the program.\n\
5075 Otherwise, any sort of insertion attempt will result in an error."),
5076 set_target_permissions
, NULL
,
5077 &setlist
, &showlist
);
5079 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5080 &may_insert_fast_tracepoints_1
, _("\
5081 Set permission to insert fast tracepoints in the target."), _("\
5082 Show permission to insert fast tracepoints in the target."), _("\
5083 When this permission is on, GDB may insert fast tracepoints.\n\
5084 Otherwise, any sort of insertion attempt will result in an error."),
5085 set_target_permissions
, NULL
,
5086 &setlist
, &showlist
);
5088 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5090 Set permission to interrupt or signal the target."), _("\
5091 Show permission to interrupt or signal the target."), _("\
5092 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5093 Otherwise, any attempt to interrupt or stop will be ignored."),
5094 set_target_permissions
, NULL
,
5095 &setlist
, &showlist
);