1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static void tcomplain (void) ATTRIBUTE_NORETURN
;
62 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
64 static int return_zero (void);
66 static int return_minus_one (void);
68 static void *return_null (void);
70 void target_ignore (void);
72 static void target_command (char *, int);
74 static struct target_ops
*find_default_run_target (char *);
76 static target_xfer_partial_ftype default_xfer_partial
;
78 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
81 static int find_default_can_async_p (struct target_ops
*ignore
);
83 static int find_default_is_async_p (struct target_ops
*ignore
);
85 #include "target-delegates.c"
87 static void init_dummy_target (void);
89 static struct target_ops debug_target
;
91 static void debug_to_open (char *, int);
93 static void debug_to_prepare_to_store (struct target_ops
*self
,
96 static void debug_to_files_info (struct target_ops
*);
98 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
99 struct bp_target_info
*);
101 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
102 struct bp_target_info
*);
104 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
107 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
109 struct bp_target_info
*);
111 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
113 struct bp_target_info
*);
115 static int debug_to_insert_watchpoint (struct target_ops
*self
,
117 struct expression
*);
119 static int debug_to_remove_watchpoint (struct target_ops
*self
,
121 struct expression
*);
123 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
125 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
126 CORE_ADDR
, CORE_ADDR
, int);
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
133 struct expression
*);
135 static void debug_to_terminal_init (struct target_ops
*self
);
137 static void debug_to_terminal_inferior (struct target_ops
*self
);
139 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
141 static void debug_to_terminal_save_ours (struct target_ops
*self
);
143 static void debug_to_terminal_ours (struct target_ops
*self
);
145 static void debug_to_load (struct target_ops
*self
, char *, int);
147 static int debug_to_can_run (struct target_ops
*self
);
149 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
154 struct target_ops
**target_structs
;
155 unsigned target_struct_size
;
156 unsigned target_struct_allocsize
;
157 #define DEFAULT_ALLOCSIZE 10
159 /* The initial current target, so that there is always a semi-valid
162 static struct target_ops dummy_target
;
164 /* Top of target stack. */
166 static struct target_ops
*target_stack
;
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
171 struct target_ops current_target
;
173 /* Command list for target. */
175 static struct cmd_list_element
*targetlist
= NULL
;
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
180 static int trust_readonly
= 0;
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
185 static int show_memory_breakpoints
= 0;
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
191 int may_write_registers
= 1;
193 int may_write_memory
= 1;
195 int may_insert_breakpoints
= 1;
197 int may_insert_tracepoints
= 1;
199 int may_insert_fast_tracepoints
= 1;
203 /* Non-zero if we want to see trace of target level stuff. */
205 static unsigned int targetdebug
= 0;
207 show_targetdebug (struct ui_file
*file
, int from_tty
,
208 struct cmd_list_element
*c
, const char *value
)
210 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
213 static void setup_target_debug (void);
215 /* The user just typed 'target' without the name of a target. */
218 target_command (char *arg
, int from_tty
)
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
224 /* Default target_has_* methods for process_stratum targets. */
227 default_child_has_all_memory (struct target_ops
*ops
)
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid
, null_ptid
))
237 default_child_has_memory (struct target_ops
*ops
)
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid
, null_ptid
))
247 default_child_has_stack (struct target_ops
*ops
)
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid
, null_ptid
))
257 default_child_has_registers (struct target_ops
*ops
)
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid
, null_ptid
))
267 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
269 /* If there's no thread selected, then we can't make it run through
271 if (ptid_equal (the_ptid
, null_ptid
))
279 target_has_all_memory_1 (void)
281 struct target_ops
*t
;
283 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
284 if (t
->to_has_all_memory (t
))
291 target_has_memory_1 (void)
293 struct target_ops
*t
;
295 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
296 if (t
->to_has_memory (t
))
303 target_has_stack_1 (void)
305 struct target_ops
*t
;
307 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
308 if (t
->to_has_stack (t
))
315 target_has_registers_1 (void)
317 struct target_ops
*t
;
319 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
320 if (t
->to_has_registers (t
))
327 target_has_execution_1 (ptid_t the_ptid
)
329 struct target_ops
*t
;
331 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
332 if (t
->to_has_execution (t
, the_ptid
))
339 target_has_execution_current (void)
341 return target_has_execution_1 (inferior_ptid
);
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
348 complete_target_initialization (struct target_ops
*t
)
350 /* Provide default values for all "must have" methods. */
351 if (t
->to_xfer_partial
== NULL
)
352 t
->to_xfer_partial
= default_xfer_partial
;
354 if (t
->to_has_all_memory
== NULL
)
355 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
357 if (t
->to_has_memory
== NULL
)
358 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
360 if (t
->to_has_stack
== NULL
)
361 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
363 if (t
->to_has_registers
== NULL
)
364 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
366 if (t
->to_has_execution
== NULL
)
367 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
369 install_delegators (t
);
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
377 add_target_with_completer (struct target_ops
*t
,
378 completer_ftype
*completer
)
380 struct cmd_list_element
*c
;
382 complete_target_initialization (t
);
386 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
387 target_structs
= (struct target_ops
**) xmalloc
388 (target_struct_allocsize
* sizeof (*target_structs
));
390 if (target_struct_size
>= target_struct_allocsize
)
392 target_struct_allocsize
*= 2;
393 target_structs
= (struct target_ops
**)
394 xrealloc ((char *) target_structs
,
395 target_struct_allocsize
* sizeof (*target_structs
));
397 target_structs
[target_struct_size
++] = t
;
399 if (targetlist
== NULL
)
400 add_prefix_cmd ("target", class_run
, target_command
, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist
, "target ", 0, &cmdlist
);
407 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
409 if (completer
!= NULL
)
410 set_cmd_completer (c
, completer
);
413 /* Add a possible target architecture to the list. */
416 add_target (struct target_ops
*t
)
418 add_target_with_completer (t
, NULL
);
424 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
426 struct cmd_list_element
*c
;
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
431 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
432 alt
= xstrprintf ("target %s", t
->to_shortname
);
433 deprecate_cmd (c
, alt
);
446 struct target_ops
*t
;
448 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
449 if (t
->to_kill
!= NULL
)
452 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
462 target_load (char *arg
, int from_tty
)
464 target_dcache_invalidate ();
465 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
469 target_create_inferior (char *exec_file
, char *args
,
470 char **env
, int from_tty
)
472 struct target_ops
*t
;
474 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
476 if (t
->to_create_inferior
!= NULL
)
478 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
480 fprintf_unfiltered (gdb_stdlog
,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file
, args
, from_tty
);
487 internal_error (__FILE__
, __LINE__
,
488 _("could not find a target to create inferior"));
492 target_terminal_inferior (void)
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution
)
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target
.to_terminal_inferior
) (¤t_target
);
507 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
508 struct target_ops
*t
)
510 errno
= EIO
; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
517 error (_("You can't do that when your target is `%s'"),
518 current_target
.to_shortname
);
524 error (_("You can't do that without a process to debug."));
528 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
530 printf_unfiltered (_("No saved terminal information.\n"));
533 /* A default implementation for the to_get_ada_task_ptid target method.
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
540 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
542 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops
*self
)
548 if (!target_can_execute_reverse
)
550 else if (!target_can_async_p ())
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
570 update_current_target (void)
572 struct target_ops
*t
;
574 /* First, reset current's contents. */
575 memset (¤t_target
, 0, sizeof (current_target
));
577 /* Install the delegators. */
578 install_delegators (¤t_target
);
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
584 for (t
= target_stack
; t
; t
= t
->beneath
)
586 INHERIT (to_shortname
, t
);
587 INHERIT (to_longname
, t
);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 /* Do not inherit to_post_attach. */
593 INHERIT (to_attach_no_wait
, t
);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 /* Do not inherit to_prepare_to_store. */
601 INHERIT (deprecated_xfer_memory
, t
);
602 /* Do not inherit to_files_info. */
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 /* Do not inherit to_can_use_hw_breakpoint. */
606 /* Do not inherit to_insert_hw_breakpoint. */
607 /* Do not inherit to_remove_hw_breakpoint. */
608 /* Do not inherit to_ranged_break_num_registers. */
609 /* Do not inherit to_insert_watchpoint. */
610 /* Do not inherit to_remove_watchpoint. */
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint
, t
);
615 INHERIT (to_have_continuable_watchpoint
, t
);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 /* Do not inherit to_watchpoint_addr_within_range. */
618 /* Do not inherit to_region_ok_for_hw_watchpoint. */
619 /* Do not inherit to_can_accel_watchpoint_condition. */
620 /* Do not inherit to_masked_watch_num_registers. */
621 /* Do not inherit to_terminal_init. */
622 /* Do not inherit to_terminal_inferior. */
623 /* Do not inherit to_terminal_ours_for_output. */
624 /* Do not inherit to_terminal_ours. */
625 /* Do not inherit to_terminal_save_ours. */
626 /* Do not inherit to_terminal_info. */
627 /* Do not inherit to_kill. */
628 /* Do not inherit to_load. */
629 /* Do no inherit to_create_inferior. */
630 /* Do not inherit to_post_startup_inferior. */
631 /* Do not inherit to_insert_fork_catchpoint. */
632 /* Do not inherit to_remove_fork_catchpoint. */
633 /* Do not inherit to_insert_vfork_catchpoint. */
634 /* Do not inherit to_remove_vfork_catchpoint. */
635 /* Do not inherit to_follow_fork. */
636 /* Do not inherit to_insert_exec_catchpoint. */
637 /* Do not inherit to_remove_exec_catchpoint. */
638 /* Do not inherit to_set_syscall_catchpoint. */
639 /* Do not inherit to_has_exited. */
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run
, t
);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 /* Do not inherit to_extra_thread_info. */
648 /* Do not inherit to_thread_name. */
649 INHERIT (to_stop
, t
);
650 /* Do not inherit to_xfer_partial. */
651 /* Do not inherit to_rcmd. */
652 INHERIT (to_pid_to_exec_file
, t
);
653 INHERIT (to_log_command
, t
);
654 INHERIT (to_stratum
, t
);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control
, t
);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions
, t
);
665 INHERIT (to_make_corefile_notes
, t
);
666 INHERIT (to_get_bookmark
, t
);
667 INHERIT (to_goto_bookmark
, t
);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse
, t
);
670 INHERIT (to_execution_direction
, t
);
671 INHERIT (to_thread_architecture
, t
);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid
, t
);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process
, t
);
676 INHERIT (to_supports_enable_disable_tracepoint
, t
);
677 INHERIT (to_supports_string_tracing
, t
);
678 INHERIT (to_trace_init
, t
);
679 INHERIT (to_download_tracepoint
, t
);
680 INHERIT (to_can_download_tracepoint
, t
);
681 INHERIT (to_download_trace_state_variable
, t
);
682 INHERIT (to_enable_tracepoint
, t
);
683 INHERIT (to_disable_tracepoint
, t
);
684 INHERIT (to_trace_set_readonly_regions
, t
);
685 INHERIT (to_trace_start
, t
);
686 INHERIT (to_get_trace_status
, t
);
687 INHERIT (to_get_tracepoint_status
, t
);
688 INHERIT (to_trace_stop
, t
);
689 INHERIT (to_trace_find
, t
);
690 INHERIT (to_get_trace_state_variable_value
, t
);
691 INHERIT (to_save_trace_data
, t
);
692 INHERIT (to_upload_tracepoints
, t
);
693 INHERIT (to_upload_trace_state_variables
, t
);
694 INHERIT (to_get_raw_trace_data
, t
);
695 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
696 INHERIT (to_set_disconnected_tracing
, t
);
697 INHERIT (to_set_circular_trace_buffer
, t
);
698 INHERIT (to_set_trace_buffer_size
, t
);
699 INHERIT (to_set_trace_notes
, t
);
700 INHERIT (to_get_tib_address
, t
);
701 INHERIT (to_set_permissions
, t
);
702 INHERIT (to_static_tracepoint_marker_at
, t
);
703 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
704 INHERIT (to_traceframe_info
, t
);
705 INHERIT (to_use_agent
, t
);
706 INHERIT (to_can_use_agent
, t
);
707 INHERIT (to_augmented_libraries_svr4_read
, t
);
708 INHERIT (to_magic
, t
);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
710 INHERIT (to_can_run_breakpoint_commands
, t
);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
726 (void (*) (char *, int))
729 (void (*) (struct target_ops
*))
731 de_fault (deprecated_xfer_memory
,
732 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
733 struct mem_attrib
*, struct target_ops
*))
735 de_fault (to_can_run
,
736 (int (*) (struct target_ops
*))
739 (void (*) (struct target_ops
*, ptid_t
))
741 de_fault (to_pid_to_exec_file
,
742 (char *(*) (struct target_ops
*, int))
744 de_fault (to_thread_architecture
,
745 default_thread_architecture
);
746 current_target
.to_read_description
= NULL
;
747 de_fault (to_get_ada_task_ptid
,
748 (ptid_t (*) (struct target_ops
*, long, long))
749 default_get_ada_task_ptid
);
750 de_fault (to_supports_multi_process
,
751 (int (*) (struct target_ops
*))
753 de_fault (to_supports_enable_disable_tracepoint
,
754 (int (*) (struct target_ops
*))
756 de_fault (to_supports_string_tracing
,
757 (int (*) (struct target_ops
*))
759 de_fault (to_trace_init
,
760 (void (*) (struct target_ops
*))
762 de_fault (to_download_tracepoint
,
763 (void (*) (struct target_ops
*, struct bp_location
*))
765 de_fault (to_can_download_tracepoint
,
766 (int (*) (struct target_ops
*))
768 de_fault (to_download_trace_state_variable
,
769 (void (*) (struct target_ops
*, struct trace_state_variable
*))
771 de_fault (to_enable_tracepoint
,
772 (void (*) (struct target_ops
*, struct bp_location
*))
774 de_fault (to_disable_tracepoint
,
775 (void (*) (struct target_ops
*, struct bp_location
*))
777 de_fault (to_trace_set_readonly_regions
,
778 (void (*) (struct target_ops
*))
780 de_fault (to_trace_start
,
781 (void (*) (struct target_ops
*))
783 de_fault (to_get_trace_status
,
784 (int (*) (struct target_ops
*, struct trace_status
*))
786 de_fault (to_get_tracepoint_status
,
787 (void (*) (struct target_ops
*, struct breakpoint
*,
788 struct uploaded_tp
*))
790 de_fault (to_trace_stop
,
791 (void (*) (struct target_ops
*))
793 de_fault (to_trace_find
,
794 (int (*) (struct target_ops
*,
795 enum trace_find_type
, int, CORE_ADDR
, CORE_ADDR
, int *))
797 de_fault (to_get_trace_state_variable_value
,
798 (int (*) (struct target_ops
*, int, LONGEST
*))
800 de_fault (to_save_trace_data
,
801 (int (*) (struct target_ops
*, const char *))
803 de_fault (to_upload_tracepoints
,
804 (int (*) (struct target_ops
*, struct uploaded_tp
**))
806 de_fault (to_upload_trace_state_variables
,
807 (int (*) (struct target_ops
*, struct uploaded_tsv
**))
809 de_fault (to_get_raw_trace_data
,
810 (LONGEST (*) (struct target_ops
*, gdb_byte
*, ULONGEST
, LONGEST
))
812 de_fault (to_get_min_fast_tracepoint_insn_len
,
813 (int (*) (struct target_ops
*))
815 de_fault (to_set_disconnected_tracing
,
816 (void (*) (struct target_ops
*, int))
818 de_fault (to_set_circular_trace_buffer
,
819 (void (*) (struct target_ops
*, int))
821 de_fault (to_set_trace_buffer_size
,
822 (void (*) (struct target_ops
*, LONGEST
))
824 de_fault (to_set_trace_notes
,
825 (int (*) (struct target_ops
*,
826 const char *, const char *, const char *))
828 de_fault (to_get_tib_address
,
829 (int (*) (struct target_ops
*, ptid_t
, CORE_ADDR
*))
831 de_fault (to_set_permissions
,
832 (void (*) (struct target_ops
*))
834 de_fault (to_static_tracepoint_marker_at
,
835 (int (*) (struct target_ops
*,
836 CORE_ADDR
, struct static_tracepoint_marker
*))
838 de_fault (to_static_tracepoint_markers_by_strid
,
839 (VEC(static_tracepoint_marker_p
) * (*) (struct target_ops
*,
842 de_fault (to_traceframe_info
,
843 (struct traceframe_info
* (*) (struct target_ops
*))
845 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
846 (int (*) (struct target_ops
*))
848 de_fault (to_can_run_breakpoint_commands
,
849 (int (*) (struct target_ops
*))
851 de_fault (to_use_agent
,
852 (int (*) (struct target_ops
*, int))
854 de_fault (to_can_use_agent
,
855 (int (*) (struct target_ops
*))
857 de_fault (to_augmented_libraries_svr4_read
,
858 (int (*) (struct target_ops
*))
860 de_fault (to_execution_direction
, default_execution_direction
);
864 /* Finally, position the target-stack beneath the squashed
865 "current_target". That way code looking for a non-inherited
866 target method can quickly and simply find it. */
867 current_target
.beneath
= target_stack
;
870 setup_target_debug ();
873 /* Push a new target type into the stack of the existing target accessors,
874 possibly superseding some of the existing accessors.
876 Rather than allow an empty stack, we always have the dummy target at
877 the bottom stratum, so we can call the function vectors without
881 push_target (struct target_ops
*t
)
883 struct target_ops
**cur
;
885 /* Check magic number. If wrong, it probably means someone changed
886 the struct definition, but not all the places that initialize one. */
887 if (t
->to_magic
!= OPS_MAGIC
)
889 fprintf_unfiltered (gdb_stderr
,
890 "Magic number of %s target struct wrong\n",
892 internal_error (__FILE__
, __LINE__
,
893 _("failed internal consistency check"));
896 /* Find the proper stratum to install this target in. */
897 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
899 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
903 /* If there's already targets at this stratum, remove them. */
904 /* FIXME: cagney/2003-10-15: I think this should be popping all
905 targets to CUR, and not just those at this stratum level. */
906 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
908 /* There's already something at this stratum level. Close it,
909 and un-hook it from the stack. */
910 struct target_ops
*tmp
= (*cur
);
912 (*cur
) = (*cur
)->beneath
;
917 /* We have removed all targets in our stratum, now add the new one. */
921 update_current_target ();
924 /* Remove a target_ops vector from the stack, wherever it may be.
925 Return how many times it was removed (0 or 1). */
928 unpush_target (struct target_ops
*t
)
930 struct target_ops
**cur
;
931 struct target_ops
*tmp
;
933 if (t
->to_stratum
== dummy_stratum
)
934 internal_error (__FILE__
, __LINE__
,
935 _("Attempt to unpush the dummy target"));
937 /* Look for the specified target. Note that we assume that a target
938 can only occur once in the target stack. */
940 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
946 /* If we don't find target_ops, quit. Only open targets should be
951 /* Unchain the target. */
953 (*cur
) = (*cur
)->beneath
;
956 update_current_target ();
958 /* Finally close the target. Note we do this after unchaining, so
959 any target method calls from within the target_close
960 implementation don't end up in T anymore. */
967 pop_all_targets_above (enum strata above_stratum
)
969 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
971 if (!unpush_target (target_stack
))
973 fprintf_unfiltered (gdb_stderr
,
974 "pop_all_targets couldn't find target %s\n",
975 target_stack
->to_shortname
);
976 internal_error (__FILE__
, __LINE__
,
977 _("failed internal consistency check"));
984 pop_all_targets (void)
986 pop_all_targets_above (dummy_stratum
);
989 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
992 target_is_pushed (struct target_ops
*t
)
994 struct target_ops
**cur
;
996 /* Check magic number. If wrong, it probably means someone changed
997 the struct definition, but not all the places that initialize one. */
998 if (t
->to_magic
!= OPS_MAGIC
)
1000 fprintf_unfiltered (gdb_stderr
,
1001 "Magic number of %s target struct wrong\n",
1003 internal_error (__FILE__
, __LINE__
,
1004 _("failed internal consistency check"));
1007 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1014 /* Using the objfile specified in OBJFILE, find the address for the
1015 current thread's thread-local storage with offset OFFSET. */
1017 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1019 volatile CORE_ADDR addr
= 0;
1020 struct target_ops
*target
;
1022 for (target
= current_target
.beneath
;
1024 target
= target
->beneath
)
1026 if (target
->to_get_thread_local_address
!= NULL
)
1031 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1033 ptid_t ptid
= inferior_ptid
;
1034 volatile struct gdb_exception ex
;
1036 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1040 /* Fetch the load module address for this objfile. */
1041 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1043 /* If it's 0, throw the appropriate exception. */
1045 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1046 _("TLS load module not found"));
1048 addr
= target
->to_get_thread_local_address (target
, ptid
,
1051 /* If an error occurred, print TLS related messages here. Otherwise,
1052 throw the error to some higher catcher. */
1055 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1059 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1060 error (_("Cannot find thread-local variables "
1061 "in this thread library."));
1063 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1064 if (objfile_is_library
)
1065 error (_("Cannot find shared library `%s' in dynamic"
1066 " linker's load module list"), objfile_name (objfile
));
1068 error (_("Cannot find executable file `%s' in dynamic"
1069 " linker's load module list"), objfile_name (objfile
));
1071 case TLS_NOT_ALLOCATED_YET_ERROR
:
1072 if (objfile_is_library
)
1073 error (_("The inferior has not yet allocated storage for"
1074 " thread-local variables in\n"
1075 "the shared library `%s'\n"
1077 objfile_name (objfile
), target_pid_to_str (ptid
));
1079 error (_("The inferior has not yet allocated storage for"
1080 " thread-local variables in\n"
1081 "the executable `%s'\n"
1083 objfile_name (objfile
), target_pid_to_str (ptid
));
1085 case TLS_GENERIC_ERROR
:
1086 if (objfile_is_library
)
1087 error (_("Cannot find thread-local storage for %s, "
1088 "shared library %s:\n%s"),
1089 target_pid_to_str (ptid
),
1090 objfile_name (objfile
), ex
.message
);
1092 error (_("Cannot find thread-local storage for %s, "
1093 "executable file %s:\n%s"),
1094 target_pid_to_str (ptid
),
1095 objfile_name (objfile
), ex
.message
);
1098 throw_exception (ex
);
1103 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1104 TLS is an ABI-specific thing. But we don't do that yet. */
1106 error (_("Cannot find thread-local variables on this target"));
1112 target_xfer_status_to_string (enum target_xfer_status err
)
1114 #define CASE(X) case X: return #X
1117 CASE(TARGET_XFER_E_IO
);
1118 CASE(TARGET_XFER_E_UNAVAILABLE
);
1127 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1129 /* target_read_string -- read a null terminated string, up to LEN bytes,
1130 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1131 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1132 is responsible for freeing it. Return the number of bytes successfully
1136 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1138 int tlen
, offset
, i
;
1142 int buffer_allocated
;
1144 unsigned int nbytes_read
= 0;
1146 gdb_assert (string
);
1148 /* Small for testing. */
1149 buffer_allocated
= 4;
1150 buffer
= xmalloc (buffer_allocated
);
1155 tlen
= MIN (len
, 4 - (memaddr
& 3));
1156 offset
= memaddr
& 3;
1158 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1161 /* The transfer request might have crossed the boundary to an
1162 unallocated region of memory. Retry the transfer, requesting
1166 errcode
= target_read_memory (memaddr
, buf
, 1);
1171 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1175 bytes
= bufptr
- buffer
;
1176 buffer_allocated
*= 2;
1177 buffer
= xrealloc (buffer
, buffer_allocated
);
1178 bufptr
= buffer
+ bytes
;
1181 for (i
= 0; i
< tlen
; i
++)
1183 *bufptr
++ = buf
[i
+ offset
];
1184 if (buf
[i
+ offset
] == '\000')
1186 nbytes_read
+= i
+ 1;
1193 nbytes_read
+= tlen
;
1202 struct target_section_table
*
1203 target_get_section_table (struct target_ops
*target
)
1205 struct target_ops
*t
;
1208 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1210 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1211 if (t
->to_get_section_table
!= NULL
)
1212 return (*t
->to_get_section_table
) (t
);
1217 /* Find a section containing ADDR. */
1219 struct target_section
*
1220 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1222 struct target_section_table
*table
= target_get_section_table (target
);
1223 struct target_section
*secp
;
1228 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1230 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1236 /* Read memory from the live target, even if currently inspecting a
1237 traceframe. The return is the same as that of target_read. */
1239 static enum target_xfer_status
1240 target_read_live_memory (enum target_object object
,
1241 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1242 ULONGEST
*xfered_len
)
1244 enum target_xfer_status ret
;
1245 struct cleanup
*cleanup
;
1247 /* Switch momentarily out of tfind mode so to access live memory.
1248 Note that this must not clear global state, such as the frame
1249 cache, which must still remain valid for the previous traceframe.
1250 We may be _building_ the frame cache at this point. */
1251 cleanup
= make_cleanup_restore_traceframe_number ();
1252 set_traceframe_number (-1);
1254 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1255 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1257 do_cleanups (cleanup
);
1261 /* Using the set of read-only target sections of OPS, read live
1262 read-only memory. Note that the actual reads start from the
1263 top-most target again.
1265 For interface/parameters/return description see target.h,
1268 static enum target_xfer_status
1269 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1270 enum target_object object
,
1271 gdb_byte
*readbuf
, ULONGEST memaddr
,
1272 ULONGEST len
, ULONGEST
*xfered_len
)
1274 struct target_section
*secp
;
1275 struct target_section_table
*table
;
1277 secp
= target_section_by_addr (ops
, memaddr
);
1279 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1280 secp
->the_bfd_section
)
1283 struct target_section
*p
;
1284 ULONGEST memend
= memaddr
+ len
;
1286 table
= target_get_section_table (ops
);
1288 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1290 if (memaddr
>= p
->addr
)
1292 if (memend
<= p
->endaddr
)
1294 /* Entire transfer is within this section. */
1295 return target_read_live_memory (object
, memaddr
,
1296 readbuf
, len
, xfered_len
);
1298 else if (memaddr
>= p
->endaddr
)
1300 /* This section ends before the transfer starts. */
1305 /* This section overlaps the transfer. Just do half. */
1306 len
= p
->endaddr
- memaddr
;
1307 return target_read_live_memory (object
, memaddr
,
1308 readbuf
, len
, xfered_len
);
1314 return TARGET_XFER_EOF
;
1317 /* Read memory from more than one valid target. A core file, for
1318 instance, could have some of memory but delegate other bits to
1319 the target below it. So, we must manually try all targets. */
1321 static enum target_xfer_status
1322 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1323 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1324 ULONGEST
*xfered_len
)
1326 enum target_xfer_status res
;
1330 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1331 readbuf
, writebuf
, memaddr
, len
,
1333 if (res
== TARGET_XFER_OK
)
1336 /* Stop if the target reports that the memory is not available. */
1337 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1340 /* We want to continue past core files to executables, but not
1341 past a running target's memory. */
1342 if (ops
->to_has_all_memory (ops
))
1347 while (ops
!= NULL
);
1352 /* Perform a partial memory transfer.
1353 For docs see target.h, to_xfer_partial. */
1355 static enum target_xfer_status
1356 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1357 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1358 ULONGEST len
, ULONGEST
*xfered_len
)
1360 enum target_xfer_status res
;
1362 struct mem_region
*region
;
1363 struct inferior
*inf
;
1365 /* For accesses to unmapped overlay sections, read directly from
1366 files. Must do this first, as MEMADDR may need adjustment. */
1367 if (readbuf
!= NULL
&& overlay_debugging
)
1369 struct obj_section
*section
= find_pc_overlay (memaddr
);
1371 if (pc_in_unmapped_range (memaddr
, section
))
1373 struct target_section_table
*table
1374 = target_get_section_table (ops
);
1375 const char *section_name
= section
->the_bfd_section
->name
;
1377 memaddr
= overlay_mapped_address (memaddr
, section
);
1378 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1379 memaddr
, len
, xfered_len
,
1381 table
->sections_end
,
1386 /* Try the executable files, if "trust-readonly-sections" is set. */
1387 if (readbuf
!= NULL
&& trust_readonly
)
1389 struct target_section
*secp
;
1390 struct target_section_table
*table
;
1392 secp
= target_section_by_addr (ops
, memaddr
);
1394 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1395 secp
->the_bfd_section
)
1398 table
= target_get_section_table (ops
);
1399 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1400 memaddr
, len
, xfered_len
,
1402 table
->sections_end
,
1407 /* If reading unavailable memory in the context of traceframes, and
1408 this address falls within a read-only section, fallback to
1409 reading from live memory. */
1410 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1412 VEC(mem_range_s
) *available
;
1414 /* If we fail to get the set of available memory, then the
1415 target does not support querying traceframe info, and so we
1416 attempt reading from the traceframe anyway (assuming the
1417 target implements the old QTro packet then). */
1418 if (traceframe_available_memory (&available
, memaddr
, len
))
1420 struct cleanup
*old_chain
;
1422 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1424 if (VEC_empty (mem_range_s
, available
)
1425 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1427 /* Don't read into the traceframe's available
1429 if (!VEC_empty (mem_range_s
, available
))
1431 LONGEST oldlen
= len
;
1433 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1434 gdb_assert (len
<= oldlen
);
1437 do_cleanups (old_chain
);
1439 /* This goes through the topmost target again. */
1440 res
= memory_xfer_live_readonly_partial (ops
, object
,
1443 if (res
== TARGET_XFER_OK
)
1444 return TARGET_XFER_OK
;
1447 /* No use trying further, we know some memory starting
1448 at MEMADDR isn't available. */
1450 return TARGET_XFER_E_UNAVAILABLE
;
1454 /* Don't try to read more than how much is available, in
1455 case the target implements the deprecated QTro packet to
1456 cater for older GDBs (the target's knowledge of read-only
1457 sections may be outdated by now). */
1458 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1460 do_cleanups (old_chain
);
1464 /* Try GDB's internal data cache. */
1465 region
= lookup_mem_region (memaddr
);
1466 /* region->hi == 0 means there's no upper bound. */
1467 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1470 reg_len
= region
->hi
- memaddr
;
1472 switch (region
->attrib
.mode
)
1475 if (writebuf
!= NULL
)
1476 return TARGET_XFER_E_IO
;
1480 if (readbuf
!= NULL
)
1481 return TARGET_XFER_E_IO
;
1485 /* We only support writing to flash during "load" for now. */
1486 if (writebuf
!= NULL
)
1487 error (_("Writing to flash memory forbidden in this context"));
1491 return TARGET_XFER_E_IO
;
1494 if (!ptid_equal (inferior_ptid
, null_ptid
))
1495 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1500 /* The dcache reads whole cache lines; that doesn't play well
1501 with reading from a trace buffer, because reading outside of
1502 the collected memory range fails. */
1503 && get_traceframe_number () == -1
1504 && (region
->attrib
.cache
1505 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1506 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1508 DCACHE
*dcache
= target_dcache_get_or_init ();
1511 if (readbuf
!= NULL
)
1512 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1514 /* FIXME drow/2006-08-09: If we're going to preserve const
1515 correctness dcache_xfer_memory should take readbuf and
1517 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1520 return TARGET_XFER_E_IO
;
1523 *xfered_len
= (ULONGEST
) l
;
1524 return TARGET_XFER_OK
;
1528 /* If none of those methods found the memory we wanted, fall back
1529 to a target partial transfer. Normally a single call to
1530 to_xfer_partial is enough; if it doesn't recognize an object
1531 it will call the to_xfer_partial of the next target down.
1532 But for memory this won't do. Memory is the only target
1533 object which can be read from more than one valid target.
1534 A core file, for instance, could have some of memory but
1535 delegate other bits to the target below it. So, we must
1536 manually try all targets. */
1538 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1541 /* Make sure the cache gets updated no matter what - if we are writing
1542 to the stack. Even if this write is not tagged as such, we still need
1543 to update the cache. */
1545 if (res
== TARGET_XFER_OK
1548 && target_dcache_init_p ()
1549 && !region
->attrib
.cache
1550 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1551 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1553 DCACHE
*dcache
= target_dcache_get ();
1555 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1558 /* If we still haven't got anything, return the last error. We
1563 /* Perform a partial memory transfer. For docs see target.h,
1566 static enum target_xfer_status
1567 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1568 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1569 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1571 enum target_xfer_status res
;
1573 /* Zero length requests are ok and require no work. */
1575 return TARGET_XFER_EOF
;
1577 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1578 breakpoint insns, thus hiding out from higher layers whether
1579 there are software breakpoints inserted in the code stream. */
1580 if (readbuf
!= NULL
)
1582 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1585 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1586 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1591 struct cleanup
*old_chain
;
1593 /* A large write request is likely to be partially satisfied
1594 by memory_xfer_partial_1. We will continually malloc
1595 and free a copy of the entire write request for breakpoint
1596 shadow handling even though we only end up writing a small
1597 subset of it. Cap writes to 4KB to mitigate this. */
1598 len
= min (4096, len
);
1600 buf
= xmalloc (len
);
1601 old_chain
= make_cleanup (xfree
, buf
);
1602 memcpy (buf
, writebuf
, len
);
1604 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1605 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1608 do_cleanups (old_chain
);
1615 restore_show_memory_breakpoints (void *arg
)
1617 show_memory_breakpoints
= (uintptr_t) arg
;
1621 make_show_memory_breakpoints_cleanup (int show
)
1623 int current
= show_memory_breakpoints
;
1625 show_memory_breakpoints
= show
;
1626 return make_cleanup (restore_show_memory_breakpoints
,
1627 (void *) (uintptr_t) current
);
1630 /* For docs see target.h, to_xfer_partial. */
1632 enum target_xfer_status
1633 target_xfer_partial (struct target_ops
*ops
,
1634 enum target_object object
, const char *annex
,
1635 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1636 ULONGEST offset
, ULONGEST len
,
1637 ULONGEST
*xfered_len
)
1639 enum target_xfer_status retval
;
1641 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1643 /* Transfer is done when LEN is zero. */
1645 return TARGET_XFER_EOF
;
1647 if (writebuf
&& !may_write_memory
)
1648 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1649 core_addr_to_string_nz (offset
), plongest (len
));
1653 /* If this is a memory transfer, let the memory-specific code
1654 have a look at it instead. Memory transfers are more
1656 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1657 || object
== TARGET_OBJECT_CODE_MEMORY
)
1658 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1659 writebuf
, offset
, len
, xfered_len
);
1660 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1662 /* Request the normal memory object from other layers. */
1663 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1667 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1668 writebuf
, offset
, len
, xfered_len
);
1672 const unsigned char *myaddr
= NULL
;
1674 fprintf_unfiltered (gdb_stdlog
,
1675 "%s:target_xfer_partial "
1676 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1679 (annex
? annex
: "(null)"),
1680 host_address_to_string (readbuf
),
1681 host_address_to_string (writebuf
),
1682 core_addr_to_string_nz (offset
),
1683 pulongest (len
), retval
,
1684 pulongest (*xfered_len
));
1690 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1694 fputs_unfiltered (", bytes =", gdb_stdlog
);
1695 for (i
= 0; i
< *xfered_len
; i
++)
1697 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1699 if (targetdebug
< 2 && i
> 0)
1701 fprintf_unfiltered (gdb_stdlog
, " ...");
1704 fprintf_unfiltered (gdb_stdlog
, "\n");
1707 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1711 fputc_unfiltered ('\n', gdb_stdlog
);
1714 /* Check implementations of to_xfer_partial update *XFERED_LEN
1715 properly. Do assertion after printing debug messages, so that we
1716 can find more clues on assertion failure from debugging messages. */
1717 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1718 gdb_assert (*xfered_len
> 0);
1723 /* Read LEN bytes of target memory at address MEMADDR, placing the
1724 results in GDB's memory at MYADDR. Returns either 0 for success or
1725 TARGET_XFER_E_IO if any error occurs.
1727 If an error occurs, no guarantee is made about the contents of the data at
1728 MYADDR. In particular, the caller should not depend upon partial reads
1729 filling the buffer with good data. There is no way for the caller to know
1730 how much good data might have been transfered anyway. Callers that can
1731 deal with partial reads should call target_read (which will retry until
1732 it makes no progress, and then return how much was transferred). */
1735 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1737 /* Dispatch to the topmost target, not the flattened current_target.
1738 Memory accesses check target->to_has_(all_)memory, and the
1739 flattened target doesn't inherit those. */
1740 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1741 myaddr
, memaddr
, len
) == len
)
1744 return TARGET_XFER_E_IO
;
1747 /* Like target_read_memory, but specify explicitly that this is a read
1748 from the target's raw memory. That is, this read bypasses the
1749 dcache, breakpoint shadowing, etc. */
1752 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1754 /* See comment in target_read_memory about why the request starts at
1755 current_target.beneath. */
1756 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1757 myaddr
, memaddr
, len
) == len
)
1760 return TARGET_XFER_E_IO
;
1763 /* Like target_read_memory, but specify explicitly that this is a read from
1764 the target's stack. This may trigger different cache behavior. */
1767 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1769 /* See comment in target_read_memory about why the request starts at
1770 current_target.beneath. */
1771 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1772 myaddr
, memaddr
, len
) == len
)
1775 return TARGET_XFER_E_IO
;
1778 /* Like target_read_memory, but specify explicitly that this is a read from
1779 the target's code. This may trigger different cache behavior. */
1782 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1784 /* See comment in target_read_memory about why the request starts at
1785 current_target.beneath. */
1786 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1787 myaddr
, memaddr
, len
) == len
)
1790 return TARGET_XFER_E_IO
;
1793 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1794 Returns either 0 for success or TARGET_XFER_E_IO if any
1795 error occurs. If an error occurs, no guarantee is made about how
1796 much data got written. Callers that can deal with partial writes
1797 should call target_write. */
1800 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1802 /* See comment in target_read_memory about why the request starts at
1803 current_target.beneath. */
1804 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1805 myaddr
, memaddr
, len
) == len
)
1808 return TARGET_XFER_E_IO
;
1811 /* Write LEN bytes from MYADDR to target raw memory at address
1812 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1813 if any error occurs. If an error occurs, no guarantee is made
1814 about how much data got written. Callers that can deal with
1815 partial writes should call target_write. */
1818 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1820 /* See comment in target_read_memory about why the request starts at
1821 current_target.beneath. */
1822 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1823 myaddr
, memaddr
, len
) == len
)
1826 return TARGET_XFER_E_IO
;
1829 /* Fetch the target's memory map. */
1832 target_memory_map (void)
1834 VEC(mem_region_s
) *result
;
1835 struct mem_region
*last_one
, *this_one
;
1837 struct target_ops
*t
;
1840 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1842 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1843 if (t
->to_memory_map
!= NULL
)
1849 result
= t
->to_memory_map (t
);
1853 qsort (VEC_address (mem_region_s
, result
),
1854 VEC_length (mem_region_s
, result
),
1855 sizeof (struct mem_region
), mem_region_cmp
);
1857 /* Check that regions do not overlap. Simultaneously assign
1858 a numbering for the "mem" commands to use to refer to
1861 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1863 this_one
->number
= ix
;
1865 if (last_one
&& last_one
->hi
> this_one
->lo
)
1867 warning (_("Overlapping regions in memory map: ignoring"));
1868 VEC_free (mem_region_s
, result
);
1871 last_one
= this_one
;
1878 target_flash_erase (ULONGEST address
, LONGEST length
)
1880 struct target_ops
*t
;
1882 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1883 if (t
->to_flash_erase
!= NULL
)
1886 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1887 hex_string (address
), phex (length
, 0));
1888 t
->to_flash_erase (t
, address
, length
);
1896 target_flash_done (void)
1898 struct target_ops
*t
;
1900 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1901 if (t
->to_flash_done
!= NULL
)
1904 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1905 t
->to_flash_done (t
);
1913 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1914 struct cmd_list_element
*c
, const char *value
)
1916 fprintf_filtered (file
,
1917 _("Mode for reading from readonly sections is %s.\n"),
1921 /* More generic transfers. */
1923 static enum target_xfer_status
1924 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1925 const char *annex
, gdb_byte
*readbuf
,
1926 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1927 ULONGEST
*xfered_len
)
1929 if (object
== TARGET_OBJECT_MEMORY
1930 && ops
->deprecated_xfer_memory
!= NULL
)
1931 /* If available, fall back to the target's
1932 "deprecated_xfer_memory" method. */
1937 if (writebuf
!= NULL
)
1939 void *buffer
= xmalloc (len
);
1940 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1942 memcpy (buffer
, writebuf
, len
);
1943 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1944 1/*write*/, NULL
, ops
);
1945 do_cleanups (cleanup
);
1947 if (readbuf
!= NULL
)
1948 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1949 0/*read*/, NULL
, ops
);
1952 *xfered_len
= (ULONGEST
) xfered
;
1953 return TARGET_XFER_E_IO
;
1955 else if (xfered
== 0 && errno
== 0)
1956 /* "deprecated_xfer_memory" uses 0, cross checked against
1957 ERRNO as one indication of an error. */
1958 return TARGET_XFER_EOF
;
1960 return TARGET_XFER_E_IO
;
1964 gdb_assert (ops
->beneath
!= NULL
);
1965 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1966 readbuf
, writebuf
, offset
, len
,
1971 /* Target vector read/write partial wrapper functions. */
1973 static enum target_xfer_status
1974 target_read_partial (struct target_ops
*ops
,
1975 enum target_object object
,
1976 const char *annex
, gdb_byte
*buf
,
1977 ULONGEST offset
, ULONGEST len
,
1978 ULONGEST
*xfered_len
)
1980 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1984 static enum target_xfer_status
1985 target_write_partial (struct target_ops
*ops
,
1986 enum target_object object
,
1987 const char *annex
, const gdb_byte
*buf
,
1988 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1990 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1994 /* Wrappers to perform the full transfer. */
1996 /* For docs on target_read see target.h. */
1999 target_read (struct target_ops
*ops
,
2000 enum target_object object
,
2001 const char *annex
, gdb_byte
*buf
,
2002 ULONGEST offset
, LONGEST len
)
2006 while (xfered
< len
)
2008 ULONGEST xfered_len
;
2009 enum target_xfer_status status
;
2011 status
= target_read_partial (ops
, object
, annex
,
2012 (gdb_byte
*) buf
+ xfered
,
2013 offset
+ xfered
, len
- xfered
,
2016 /* Call an observer, notifying them of the xfer progress? */
2017 if (status
== TARGET_XFER_EOF
)
2019 else if (status
== TARGET_XFER_OK
)
2021 xfered
+= xfered_len
;
2031 /* Assuming that the entire [begin, end) range of memory cannot be
2032 read, try to read whatever subrange is possible to read.
2034 The function returns, in RESULT, either zero or one memory block.
2035 If there's a readable subrange at the beginning, it is completely
2036 read and returned. Any further readable subrange will not be read.
2037 Otherwise, if there's a readable subrange at the end, it will be
2038 completely read and returned. Any readable subranges before it
2039 (obviously, not starting at the beginning), will be ignored. In
2040 other cases -- either no readable subrange, or readable subrange(s)
2041 that is neither at the beginning, or end, nothing is returned.
2043 The purpose of this function is to handle a read across a boundary
2044 of accessible memory in a case when memory map is not available.
2045 The above restrictions are fine for this case, but will give
2046 incorrect results if the memory is 'patchy'. However, supporting
2047 'patchy' memory would require trying to read every single byte,
2048 and it seems unacceptable solution. Explicit memory map is
2049 recommended for this case -- and target_read_memory_robust will
2050 take care of reading multiple ranges then. */
2053 read_whatever_is_readable (struct target_ops
*ops
,
2054 ULONGEST begin
, ULONGEST end
,
2055 VEC(memory_read_result_s
) **result
)
2057 gdb_byte
*buf
= xmalloc (end
- begin
);
2058 ULONGEST current_begin
= begin
;
2059 ULONGEST current_end
= end
;
2061 memory_read_result_s r
;
2062 ULONGEST xfered_len
;
2064 /* If we previously failed to read 1 byte, nothing can be done here. */
2065 if (end
- begin
<= 1)
2071 /* Check that either first or the last byte is readable, and give up
2072 if not. This heuristic is meant to permit reading accessible memory
2073 at the boundary of accessible region. */
2074 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2075 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2080 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2081 buf
+ (end
-begin
) - 1, end
- 1, 1,
2082 &xfered_len
) == TARGET_XFER_OK
)
2093 /* Loop invariant is that the [current_begin, current_end) was previously
2094 found to be not readable as a whole.
2096 Note loop condition -- if the range has 1 byte, we can't divide the range
2097 so there's no point trying further. */
2098 while (current_end
- current_begin
> 1)
2100 ULONGEST first_half_begin
, first_half_end
;
2101 ULONGEST second_half_begin
, second_half_end
;
2103 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2107 first_half_begin
= current_begin
;
2108 first_half_end
= middle
;
2109 second_half_begin
= middle
;
2110 second_half_end
= current_end
;
2114 first_half_begin
= middle
;
2115 first_half_end
= current_end
;
2116 second_half_begin
= current_begin
;
2117 second_half_end
= middle
;
2120 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2121 buf
+ (first_half_begin
- begin
),
2123 first_half_end
- first_half_begin
);
2125 if (xfer
== first_half_end
- first_half_begin
)
2127 /* This half reads up fine. So, the error must be in the
2129 current_begin
= second_half_begin
;
2130 current_end
= second_half_end
;
2134 /* This half is not readable. Because we've tried one byte, we
2135 know some part of this half if actually redable. Go to the next
2136 iteration to divide again and try to read.
2138 We don't handle the other half, because this function only tries
2139 to read a single readable subrange. */
2140 current_begin
= first_half_begin
;
2141 current_end
= first_half_end
;
2147 /* The [begin, current_begin) range has been read. */
2149 r
.end
= current_begin
;
2154 /* The [current_end, end) range has been read. */
2155 LONGEST rlen
= end
- current_end
;
2157 r
.data
= xmalloc (rlen
);
2158 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2159 r
.begin
= current_end
;
2163 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2167 free_memory_read_result_vector (void *x
)
2169 VEC(memory_read_result_s
) *v
= x
;
2170 memory_read_result_s
*current
;
2173 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2175 xfree (current
->data
);
2177 VEC_free (memory_read_result_s
, v
);
2180 VEC(memory_read_result_s
) *
2181 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2183 VEC(memory_read_result_s
) *result
= 0;
2186 while (xfered
< len
)
2188 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2191 /* If there is no explicit region, a fake one should be created. */
2192 gdb_assert (region
);
2194 if (region
->hi
== 0)
2195 rlen
= len
- xfered
;
2197 rlen
= region
->hi
- offset
;
2199 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2201 /* Cannot read this region. Note that we can end up here only
2202 if the region is explicitly marked inaccessible, or
2203 'inaccessible-by-default' is in effect. */
2208 LONGEST to_read
= min (len
- xfered
, rlen
);
2209 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2211 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2212 (gdb_byte
*) buffer
,
2213 offset
+ xfered
, to_read
);
2214 /* Call an observer, notifying them of the xfer progress? */
2217 /* Got an error reading full chunk. See if maybe we can read
2220 read_whatever_is_readable (ops
, offset
+ xfered
,
2221 offset
+ xfered
+ to_read
, &result
);
2226 struct memory_read_result r
;
2228 r
.begin
= offset
+ xfered
;
2229 r
.end
= r
.begin
+ xfer
;
2230 VEC_safe_push (memory_read_result_s
, result
, &r
);
2240 /* An alternative to target_write with progress callbacks. */
2243 target_write_with_progress (struct target_ops
*ops
,
2244 enum target_object object
,
2245 const char *annex
, const gdb_byte
*buf
,
2246 ULONGEST offset
, LONGEST len
,
2247 void (*progress
) (ULONGEST
, void *), void *baton
)
2251 /* Give the progress callback a chance to set up. */
2253 (*progress
) (0, baton
);
2255 while (xfered
< len
)
2257 ULONGEST xfered_len
;
2258 enum target_xfer_status status
;
2260 status
= target_write_partial (ops
, object
, annex
,
2261 (gdb_byte
*) buf
+ xfered
,
2262 offset
+ xfered
, len
- xfered
,
2265 if (status
== TARGET_XFER_EOF
)
2267 if (TARGET_XFER_STATUS_ERROR_P (status
))
2270 gdb_assert (status
== TARGET_XFER_OK
);
2272 (*progress
) (xfered_len
, baton
);
2274 xfered
+= xfered_len
;
2280 /* For docs on target_write see target.h. */
2283 target_write (struct target_ops
*ops
,
2284 enum target_object object
,
2285 const char *annex
, const gdb_byte
*buf
,
2286 ULONGEST offset
, LONGEST len
)
2288 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2292 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2293 the size of the transferred data. PADDING additional bytes are
2294 available in *BUF_P. This is a helper function for
2295 target_read_alloc; see the declaration of that function for more
2299 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2300 const char *annex
, gdb_byte
**buf_p
, int padding
)
2302 size_t buf_alloc
, buf_pos
;
2305 /* This function does not have a length parameter; it reads the
2306 entire OBJECT). Also, it doesn't support objects fetched partly
2307 from one target and partly from another (in a different stratum,
2308 e.g. a core file and an executable). Both reasons make it
2309 unsuitable for reading memory. */
2310 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2312 /* Start by reading up to 4K at a time. The target will throttle
2313 this number down if necessary. */
2315 buf
= xmalloc (buf_alloc
);
2319 ULONGEST xfered_len
;
2320 enum target_xfer_status status
;
2322 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2323 buf_pos
, buf_alloc
- buf_pos
- padding
,
2326 if (status
== TARGET_XFER_EOF
)
2328 /* Read all there was. */
2335 else if (status
!= TARGET_XFER_OK
)
2337 /* An error occurred. */
2339 return TARGET_XFER_E_IO
;
2342 buf_pos
+= xfered_len
;
2344 /* If the buffer is filling up, expand it. */
2345 if (buf_alloc
< buf_pos
* 2)
2348 buf
= xrealloc (buf
, buf_alloc
);
2355 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2356 the size of the transferred data. See the declaration in "target.h"
2357 function for more information about the return value. */
2360 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2361 const char *annex
, gdb_byte
**buf_p
)
2363 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2366 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2367 returned as a string, allocated using xmalloc. If an error occurs
2368 or the transfer is unsupported, NULL is returned. Empty objects
2369 are returned as allocated but empty strings. A warning is issued
2370 if the result contains any embedded NUL bytes. */
2373 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2378 LONGEST i
, transferred
;
2380 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2381 bufstr
= (char *) buffer
;
2383 if (transferred
< 0)
2386 if (transferred
== 0)
2387 return xstrdup ("");
2389 bufstr
[transferred
] = 0;
2391 /* Check for embedded NUL bytes; but allow trailing NULs. */
2392 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2395 warning (_("target object %d, annex %s, "
2396 "contained unexpected null characters"),
2397 (int) object
, annex
? annex
: "(none)");
2404 /* Memory transfer methods. */
2407 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2410 /* This method is used to read from an alternate, non-current
2411 target. This read must bypass the overlay support (as symbols
2412 don't match this target), and GDB's internal cache (wrong cache
2413 for this target). */
2414 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2416 memory_error (TARGET_XFER_E_IO
, addr
);
2420 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2421 int len
, enum bfd_endian byte_order
)
2423 gdb_byte buf
[sizeof (ULONGEST
)];
2425 gdb_assert (len
<= sizeof (buf
));
2426 get_target_memory (ops
, addr
, buf
, len
);
2427 return extract_unsigned_integer (buf
, len
, byte_order
);
2433 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2434 struct bp_target_info
*bp_tgt
)
2436 if (!may_insert_breakpoints
)
2438 warning (_("May not insert breakpoints"));
2442 return current_target
.to_insert_breakpoint (¤t_target
,
2449 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2450 struct bp_target_info
*bp_tgt
)
2452 /* This is kind of a weird case to handle, but the permission might
2453 have been changed after breakpoints were inserted - in which case
2454 we should just take the user literally and assume that any
2455 breakpoints should be left in place. */
2456 if (!may_insert_breakpoints
)
2458 warning (_("May not remove breakpoints"));
2462 return current_target
.to_remove_breakpoint (¤t_target
,
2467 target_info (char *args
, int from_tty
)
2469 struct target_ops
*t
;
2470 int has_all_mem
= 0;
2472 if (symfile_objfile
!= NULL
)
2473 printf_unfiltered (_("Symbols from \"%s\".\n"),
2474 objfile_name (symfile_objfile
));
2476 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2478 if (!(*t
->to_has_memory
) (t
))
2481 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2484 printf_unfiltered (_("\tWhile running this, "
2485 "GDB does not access memory from...\n"));
2486 printf_unfiltered ("%s:\n", t
->to_longname
);
2487 (t
->to_files_info
) (t
);
2488 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2492 /* This function is called before any new inferior is created, e.g.
2493 by running a program, attaching, or connecting to a target.
2494 It cleans up any state from previous invocations which might
2495 change between runs. This is a subset of what target_preopen
2496 resets (things which might change between targets). */
2499 target_pre_inferior (int from_tty
)
2501 /* Clear out solib state. Otherwise the solib state of the previous
2502 inferior might have survived and is entirely wrong for the new
2503 target. This has been observed on GNU/Linux using glibc 2.3. How
2515 Cannot access memory at address 0xdeadbeef
2518 /* In some OSs, the shared library list is the same/global/shared
2519 across inferiors. If code is shared between processes, so are
2520 memory regions and features. */
2521 if (!gdbarch_has_global_solist (target_gdbarch ()))
2523 no_shared_libraries (NULL
, from_tty
);
2525 invalidate_target_mem_regions ();
2527 target_clear_description ();
2530 agent_capability_invalidate ();
2533 /* Callback for iterate_over_inferiors. Gets rid of the given
2537 dispose_inferior (struct inferior
*inf
, void *args
)
2539 struct thread_info
*thread
;
2541 thread
= any_thread_of_process (inf
->pid
);
2544 switch_to_thread (thread
->ptid
);
2546 /* Core inferiors actually should be detached, not killed. */
2547 if (target_has_execution
)
2550 target_detach (NULL
, 0);
2556 /* This is to be called by the open routine before it does
2560 target_preopen (int from_tty
)
2564 if (have_inferiors ())
2567 || !have_live_inferiors ()
2568 || query (_("A program is being debugged already. Kill it? ")))
2569 iterate_over_inferiors (dispose_inferior
, NULL
);
2571 error (_("Program not killed."));
2574 /* Calling target_kill may remove the target from the stack. But if
2575 it doesn't (which seems like a win for UDI), remove it now. */
2576 /* Leave the exec target, though. The user may be switching from a
2577 live process to a core of the same program. */
2578 pop_all_targets_above (file_stratum
);
2580 target_pre_inferior (from_tty
);
2583 /* Detach a target after doing deferred register stores. */
2586 target_detach (const char *args
, int from_tty
)
2588 struct target_ops
* t
;
2590 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2591 /* Don't remove global breakpoints here. They're removed on
2592 disconnection from the target. */
2595 /* If we're in breakpoints-always-inserted mode, have to remove
2596 them before detaching. */
2597 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2599 prepare_for_detach ();
2601 current_target
.to_detach (¤t_target
, args
, from_tty
);
2603 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2608 target_disconnect (char *args
, int from_tty
)
2610 struct target_ops
*t
;
2612 /* If we're in breakpoints-always-inserted mode or if breakpoints
2613 are global across processes, we have to remove them before
2615 remove_breakpoints ();
2617 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2618 if (t
->to_disconnect
!= NULL
)
2621 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2623 t
->to_disconnect (t
, args
, from_tty
);
2631 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2633 struct target_ops
*t
;
2634 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2639 char *status_string
;
2640 char *options_string
;
2642 status_string
= target_waitstatus_to_string (status
);
2643 options_string
= target_options_to_string (options
);
2644 fprintf_unfiltered (gdb_stdlog
,
2645 "target_wait (%d, status, options={%s})"
2647 ptid_get_pid (ptid
), options_string
,
2648 ptid_get_pid (retval
), status_string
);
2649 xfree (status_string
);
2650 xfree (options_string
);
2657 target_pid_to_str (ptid_t ptid
)
2659 struct target_ops
*t
;
2661 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2663 if (t
->to_pid_to_str
!= NULL
)
2664 return (*t
->to_pid_to_str
) (t
, ptid
);
2667 return normal_pid_to_str (ptid
);
2671 target_thread_name (struct thread_info
*info
)
2673 return current_target
.to_thread_name (¤t_target
, info
);
2677 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2679 struct target_ops
*t
;
2681 target_dcache_invalidate ();
2683 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2685 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2686 ptid_get_pid (ptid
),
2687 step
? "step" : "continue",
2688 gdb_signal_to_name (signal
));
2690 registers_changed_ptid (ptid
);
2691 set_executing (ptid
, 1);
2692 set_running (ptid
, 1);
2693 clear_inline_frame_state (ptid
);
2697 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2699 struct target_ops
*t
;
2701 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2703 if (t
->to_pass_signals
!= NULL
)
2709 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2712 for (i
= 0; i
< numsigs
; i
++)
2713 if (pass_signals
[i
])
2714 fprintf_unfiltered (gdb_stdlog
, " %s",
2715 gdb_signal_to_name (i
));
2717 fprintf_unfiltered (gdb_stdlog
, " })\n");
2720 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2727 target_program_signals (int numsigs
, unsigned char *program_signals
)
2729 struct target_ops
*t
;
2731 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2733 if (t
->to_program_signals
!= NULL
)
2739 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2742 for (i
= 0; i
< numsigs
; i
++)
2743 if (program_signals
[i
])
2744 fprintf_unfiltered (gdb_stdlog
, " %s",
2745 gdb_signal_to_name (i
));
2747 fprintf_unfiltered (gdb_stdlog
, " })\n");
2750 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2756 /* Look through the list of possible targets for a target that can
2760 target_follow_fork (int follow_child
, int detach_fork
)
2762 struct target_ops
*t
;
2764 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2766 if (t
->to_follow_fork
!= NULL
)
2768 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2771 fprintf_unfiltered (gdb_stdlog
,
2772 "target_follow_fork (%d, %d) = %d\n",
2773 follow_child
, detach_fork
, retval
);
2778 /* Some target returned a fork event, but did not know how to follow it. */
2779 internal_error (__FILE__
, __LINE__
,
2780 _("could not find a target to follow fork"));
2784 target_mourn_inferior (void)
2786 struct target_ops
*t
;
2788 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2790 if (t
->to_mourn_inferior
!= NULL
)
2792 t
->to_mourn_inferior (t
);
2794 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2796 /* We no longer need to keep handles on any of the object files.
2797 Make sure to release them to avoid unnecessarily locking any
2798 of them while we're not actually debugging. */
2799 bfd_cache_close_all ();
2805 internal_error (__FILE__
, __LINE__
,
2806 _("could not find a target to follow mourn inferior"));
2809 /* Look for a target which can describe architectural features, starting
2810 from TARGET. If we find one, return its description. */
2812 const struct target_desc
*
2813 target_read_description (struct target_ops
*target
)
2815 struct target_ops
*t
;
2817 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2818 if (t
->to_read_description
!= NULL
)
2820 const struct target_desc
*tdesc
;
2822 tdesc
= t
->to_read_description (t
);
2830 /* The default implementation of to_search_memory.
2831 This implements a basic search of memory, reading target memory and
2832 performing the search here (as opposed to performing the search in on the
2833 target side with, for example, gdbserver). */
2836 simple_search_memory (struct target_ops
*ops
,
2837 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2838 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2839 CORE_ADDR
*found_addrp
)
2841 /* NOTE: also defined in find.c testcase. */
2842 #define SEARCH_CHUNK_SIZE 16000
2843 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2844 /* Buffer to hold memory contents for searching. */
2845 gdb_byte
*search_buf
;
2846 unsigned search_buf_size
;
2847 struct cleanup
*old_cleanups
;
2849 search_buf_size
= chunk_size
+ pattern_len
- 1;
2851 /* No point in trying to allocate a buffer larger than the search space. */
2852 if (search_space_len
< search_buf_size
)
2853 search_buf_size
= search_space_len
;
2855 search_buf
= malloc (search_buf_size
);
2856 if (search_buf
== NULL
)
2857 error (_("Unable to allocate memory to perform the search."));
2858 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2860 /* Prime the search buffer. */
2862 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2863 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2865 warning (_("Unable to access %s bytes of target "
2866 "memory at %s, halting search."),
2867 pulongest (search_buf_size
), hex_string (start_addr
));
2868 do_cleanups (old_cleanups
);
2872 /* Perform the search.
2874 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2875 When we've scanned N bytes we copy the trailing bytes to the start and
2876 read in another N bytes. */
2878 while (search_space_len
>= pattern_len
)
2880 gdb_byte
*found_ptr
;
2881 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2883 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2884 pattern
, pattern_len
);
2886 if (found_ptr
!= NULL
)
2888 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2890 *found_addrp
= found_addr
;
2891 do_cleanups (old_cleanups
);
2895 /* Not found in this chunk, skip to next chunk. */
2897 /* Don't let search_space_len wrap here, it's unsigned. */
2898 if (search_space_len
>= chunk_size
)
2899 search_space_len
-= chunk_size
;
2901 search_space_len
= 0;
2903 if (search_space_len
>= pattern_len
)
2905 unsigned keep_len
= search_buf_size
- chunk_size
;
2906 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2909 /* Copy the trailing part of the previous iteration to the front
2910 of the buffer for the next iteration. */
2911 gdb_assert (keep_len
== pattern_len
- 1);
2912 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2914 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2916 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2917 search_buf
+ keep_len
, read_addr
,
2918 nr_to_read
) != nr_to_read
)
2920 warning (_("Unable to access %s bytes of target "
2921 "memory at %s, halting search."),
2922 plongest (nr_to_read
),
2923 hex_string (read_addr
));
2924 do_cleanups (old_cleanups
);
2928 start_addr
+= chunk_size
;
2934 do_cleanups (old_cleanups
);
2938 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2939 sequence of bytes in PATTERN with length PATTERN_LEN.
2941 The result is 1 if found, 0 if not found, and -1 if there was an error
2942 requiring halting of the search (e.g. memory read error).
2943 If the pattern is found the address is recorded in FOUND_ADDRP. */
2946 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2947 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2948 CORE_ADDR
*found_addrp
)
2950 struct target_ops
*t
;
2953 /* We don't use INHERIT to set current_target.to_search_memory,
2954 so we have to scan the target stack and handle targetdebug
2958 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2959 hex_string (start_addr
));
2961 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2962 if (t
->to_search_memory
!= NULL
)
2967 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2968 pattern
, pattern_len
, found_addrp
);
2972 /* If a special version of to_search_memory isn't available, use the
2974 found
= simple_search_memory (current_target
.beneath
,
2975 start_addr
, search_space_len
,
2976 pattern
, pattern_len
, found_addrp
);
2980 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2985 /* Look through the currently pushed targets. If none of them will
2986 be able to restart the currently running process, issue an error
2990 target_require_runnable (void)
2992 struct target_ops
*t
;
2994 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2996 /* If this target knows how to create a new program, then
2997 assume we will still be able to after killing the current
2998 one. Either killing and mourning will not pop T, or else
2999 find_default_run_target will find it again. */
3000 if (t
->to_create_inferior
!= NULL
)
3003 /* Do not worry about thread_stratum targets that can not
3004 create inferiors. Assume they will be pushed again if
3005 necessary, and continue to the process_stratum. */
3006 if (t
->to_stratum
== thread_stratum
3007 || t
->to_stratum
== arch_stratum
)
3010 error (_("The \"%s\" target does not support \"run\". "
3011 "Try \"help target\" or \"continue\"."),
3015 /* This function is only called if the target is running. In that
3016 case there should have been a process_stratum target and it
3017 should either know how to create inferiors, or not... */
3018 internal_error (__FILE__
, __LINE__
, _("No targets found"));
3021 /* Look through the list of possible targets for a target that can
3022 execute a run or attach command without any other data. This is
3023 used to locate the default process stratum.
3025 If DO_MESG is not NULL, the result is always valid (error() is
3026 called for errors); else, return NULL on error. */
3028 static struct target_ops
*
3029 find_default_run_target (char *do_mesg
)
3031 struct target_ops
**t
;
3032 struct target_ops
*runable
= NULL
;
3037 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
3040 if ((*t
)->to_can_run
&& target_can_run (*t
))
3050 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3059 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3061 struct target_ops
*t
;
3063 t
= find_default_run_target ("attach");
3064 (t
->to_attach
) (t
, args
, from_tty
);
3069 find_default_create_inferior (struct target_ops
*ops
,
3070 char *exec_file
, char *allargs
, char **env
,
3073 struct target_ops
*t
;
3075 t
= find_default_run_target ("run");
3076 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3081 find_default_can_async_p (struct target_ops
*ignore
)
3083 struct target_ops
*t
;
3085 /* This may be called before the target is pushed on the stack;
3086 look for the default process stratum. If there's none, gdb isn't
3087 configured with a native debugger, and target remote isn't
3089 t
= find_default_run_target (NULL
);
3090 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3091 return (t
->to_can_async_p
) (t
);
3096 find_default_is_async_p (struct target_ops
*ignore
)
3098 struct target_ops
*t
;
3100 /* This may be called before the target is pushed on the stack;
3101 look for the default process stratum. If there's none, gdb isn't
3102 configured with a native debugger, and target remote isn't
3104 t
= find_default_run_target (NULL
);
3105 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3106 return (t
->to_is_async_p
) (t
);
3111 find_default_supports_non_stop (struct target_ops
*self
)
3113 struct target_ops
*t
;
3115 t
= find_default_run_target (NULL
);
3116 if (t
&& t
->to_supports_non_stop
)
3117 return (t
->to_supports_non_stop
) (t
);
3122 target_supports_non_stop (void)
3124 struct target_ops
*t
;
3126 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3127 if (t
->to_supports_non_stop
)
3128 return t
->to_supports_non_stop (t
);
3133 /* Implement the "info proc" command. */
3136 target_info_proc (char *args
, enum info_proc_what what
)
3138 struct target_ops
*t
;
3140 /* If we're already connected to something that can get us OS
3141 related data, use it. Otherwise, try using the native
3143 if (current_target
.to_stratum
>= process_stratum
)
3144 t
= current_target
.beneath
;
3146 t
= find_default_run_target (NULL
);
3148 for (; t
!= NULL
; t
= t
->beneath
)
3150 if (t
->to_info_proc
!= NULL
)
3152 t
->to_info_proc (t
, args
, what
);
3155 fprintf_unfiltered (gdb_stdlog
,
3156 "target_info_proc (\"%s\", %d)\n", args
, what
);
3166 find_default_supports_disable_randomization (struct target_ops
*self
)
3168 struct target_ops
*t
;
3170 t
= find_default_run_target (NULL
);
3171 if (t
&& t
->to_supports_disable_randomization
)
3172 return (t
->to_supports_disable_randomization
) (t
);
3177 target_supports_disable_randomization (void)
3179 struct target_ops
*t
;
3181 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3182 if (t
->to_supports_disable_randomization
)
3183 return t
->to_supports_disable_randomization (t
);
3189 target_get_osdata (const char *type
)
3191 struct target_ops
*t
;
3193 /* If we're already connected to something that can get us OS
3194 related data, use it. Otherwise, try using the native
3196 if (current_target
.to_stratum
>= process_stratum
)
3197 t
= current_target
.beneath
;
3199 t
= find_default_run_target ("get OS data");
3204 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3207 /* Determine the current address space of thread PTID. */
3209 struct address_space
*
3210 target_thread_address_space (ptid_t ptid
)
3212 struct address_space
*aspace
;
3213 struct inferior
*inf
;
3214 struct target_ops
*t
;
3216 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3218 if (t
->to_thread_address_space
!= NULL
)
3220 aspace
= t
->to_thread_address_space (t
, ptid
);
3221 gdb_assert (aspace
);
3224 fprintf_unfiltered (gdb_stdlog
,
3225 "target_thread_address_space (%s) = %d\n",
3226 target_pid_to_str (ptid
),
3227 address_space_num (aspace
));
3232 /* Fall-back to the "main" address space of the inferior. */
3233 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3235 if (inf
== NULL
|| inf
->aspace
== NULL
)
3236 internal_error (__FILE__
, __LINE__
,
3237 _("Can't determine the current "
3238 "address space of thread %s\n"),
3239 target_pid_to_str (ptid
));
3245 /* Target file operations. */
3247 static struct target_ops
*
3248 default_fileio_target (void)
3250 /* If we're already connected to something that can perform
3251 file I/O, use it. Otherwise, try using the native target. */
3252 if (current_target
.to_stratum
>= process_stratum
)
3253 return current_target
.beneath
;
3255 return find_default_run_target ("file I/O");
3258 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3259 target file descriptor, or -1 if an error occurs (and set
3262 target_fileio_open (const char *filename
, int flags
, int mode
,
3265 struct target_ops
*t
;
3267 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3269 if (t
->to_fileio_open
!= NULL
)
3271 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3274 fprintf_unfiltered (gdb_stdlog
,
3275 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3276 filename
, flags
, mode
,
3277 fd
, fd
!= -1 ? 0 : *target_errno
);
3282 *target_errno
= FILEIO_ENOSYS
;
3286 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3287 Return the number of bytes written, or -1 if an error occurs
3288 (and set *TARGET_ERRNO). */
3290 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3291 ULONGEST offset
, int *target_errno
)
3293 struct target_ops
*t
;
3295 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3297 if (t
->to_fileio_pwrite
!= NULL
)
3299 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3303 fprintf_unfiltered (gdb_stdlog
,
3304 "target_fileio_pwrite (%d,...,%d,%s) "
3306 fd
, len
, pulongest (offset
),
3307 ret
, ret
!= -1 ? 0 : *target_errno
);
3312 *target_errno
= FILEIO_ENOSYS
;
3316 /* Read up to LEN bytes FD on the target into READ_BUF.
3317 Return the number of bytes read, or -1 if an error occurs
3318 (and set *TARGET_ERRNO). */
3320 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3321 ULONGEST offset
, int *target_errno
)
3323 struct target_ops
*t
;
3325 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3327 if (t
->to_fileio_pread
!= NULL
)
3329 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3333 fprintf_unfiltered (gdb_stdlog
,
3334 "target_fileio_pread (%d,...,%d,%s) "
3336 fd
, len
, pulongest (offset
),
3337 ret
, ret
!= -1 ? 0 : *target_errno
);
3342 *target_errno
= FILEIO_ENOSYS
;
3346 /* Close FD on the target. Return 0, or -1 if an error occurs
3347 (and set *TARGET_ERRNO). */
3349 target_fileio_close (int fd
, int *target_errno
)
3351 struct target_ops
*t
;
3353 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3355 if (t
->to_fileio_close
!= NULL
)
3357 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3360 fprintf_unfiltered (gdb_stdlog
,
3361 "target_fileio_close (%d) = %d (%d)\n",
3362 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3367 *target_errno
= FILEIO_ENOSYS
;
3371 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3372 occurs (and set *TARGET_ERRNO). */
3374 target_fileio_unlink (const char *filename
, int *target_errno
)
3376 struct target_ops
*t
;
3378 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3380 if (t
->to_fileio_unlink
!= NULL
)
3382 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3385 fprintf_unfiltered (gdb_stdlog
,
3386 "target_fileio_unlink (%s) = %d (%d)\n",
3387 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3392 *target_errno
= FILEIO_ENOSYS
;
3396 /* Read value of symbolic link FILENAME on the target. Return a
3397 null-terminated string allocated via xmalloc, or NULL if an error
3398 occurs (and set *TARGET_ERRNO). */
3400 target_fileio_readlink (const char *filename
, int *target_errno
)
3402 struct target_ops
*t
;
3404 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3406 if (t
->to_fileio_readlink
!= NULL
)
3408 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3411 fprintf_unfiltered (gdb_stdlog
,
3412 "target_fileio_readlink (%s) = %s (%d)\n",
3413 filename
, ret
? ret
: "(nil)",
3414 ret
? 0 : *target_errno
);
3419 *target_errno
= FILEIO_ENOSYS
;
3424 target_fileio_close_cleanup (void *opaque
)
3426 int fd
= *(int *) opaque
;
3429 target_fileio_close (fd
, &target_errno
);
3432 /* Read target file FILENAME. Store the result in *BUF_P and
3433 return the size of the transferred data. PADDING additional bytes are
3434 available in *BUF_P. This is a helper function for
3435 target_fileio_read_alloc; see the declaration of that function for more
3439 target_fileio_read_alloc_1 (const char *filename
,
3440 gdb_byte
**buf_p
, int padding
)
3442 struct cleanup
*close_cleanup
;
3443 size_t buf_alloc
, buf_pos
;
3449 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3453 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3455 /* Start by reading up to 4K at a time. The target will throttle
3456 this number down if necessary. */
3458 buf
= xmalloc (buf_alloc
);
3462 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3463 buf_alloc
- buf_pos
- padding
, buf_pos
,
3467 /* An error occurred. */
3468 do_cleanups (close_cleanup
);
3474 /* Read all there was. */
3475 do_cleanups (close_cleanup
);
3485 /* If the buffer is filling up, expand it. */
3486 if (buf_alloc
< buf_pos
* 2)
3489 buf
= xrealloc (buf
, buf_alloc
);
3496 /* Read target file FILENAME. Store the result in *BUF_P and return
3497 the size of the transferred data. See the declaration in "target.h"
3498 function for more information about the return value. */
3501 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3503 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3506 /* Read target file FILENAME. The result is NUL-terminated and
3507 returned as a string, allocated using xmalloc. If an error occurs
3508 or the transfer is unsupported, NULL is returned. Empty objects
3509 are returned as allocated but empty strings. A warning is issued
3510 if the result contains any embedded NUL bytes. */
3513 target_fileio_read_stralloc (const char *filename
)
3517 LONGEST i
, transferred
;
3519 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3520 bufstr
= (char *) buffer
;
3522 if (transferred
< 0)
3525 if (transferred
== 0)
3526 return xstrdup ("");
3528 bufstr
[transferred
] = 0;
3530 /* Check for embedded NUL bytes; but allow trailing NULs. */
3531 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3534 warning (_("target file %s "
3535 "contained unexpected null characters"),
3545 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3546 CORE_ADDR addr
, int len
)
3548 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3552 default_watchpoint_addr_within_range (struct target_ops
*target
,
3554 CORE_ADDR start
, int length
)
3556 return addr
>= start
&& addr
< start
+ length
;
3559 static struct gdbarch
*
3560 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3562 return target_gdbarch ();
3572 return_minus_one (void)
3584 * Find the next target down the stack from the specified target.
3588 find_target_beneath (struct target_ops
*t
)
3596 find_target_at (enum strata stratum
)
3598 struct target_ops
*t
;
3600 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3601 if (t
->to_stratum
== stratum
)
3608 /* The inferior process has died. Long live the inferior! */
3611 generic_mourn_inferior (void)
3615 ptid
= inferior_ptid
;
3616 inferior_ptid
= null_ptid
;
3618 /* Mark breakpoints uninserted in case something tries to delete a
3619 breakpoint while we delete the inferior's threads (which would
3620 fail, since the inferior is long gone). */
3621 mark_breakpoints_out ();
3623 if (!ptid_equal (ptid
, null_ptid
))
3625 int pid
= ptid_get_pid (ptid
);
3626 exit_inferior (pid
);
3629 /* Note this wipes step-resume breakpoints, so needs to be done
3630 after exit_inferior, which ends up referencing the step-resume
3631 breakpoints through clear_thread_inferior_resources. */
3632 breakpoint_init_inferior (inf_exited
);
3634 registers_changed ();
3636 reopen_exec_file ();
3637 reinit_frame_cache ();
3639 if (deprecated_detach_hook
)
3640 deprecated_detach_hook ();
3643 /* Convert a normal process ID to a string. Returns the string in a
3647 normal_pid_to_str (ptid_t ptid
)
3649 static char buf
[32];
3651 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3656 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3658 return normal_pid_to_str (ptid
);
3661 /* Error-catcher for target_find_memory_regions. */
3663 dummy_find_memory_regions (struct target_ops
*self
,
3664 find_memory_region_ftype ignore1
, void *ignore2
)
3666 error (_("Command not implemented for this target."));
3670 /* Error-catcher for target_make_corefile_notes. */
3672 dummy_make_corefile_notes (struct target_ops
*self
,
3673 bfd
*ignore1
, int *ignore2
)
3675 error (_("Command not implemented for this target."));
3679 /* Error-catcher for target_get_bookmark. */
3681 dummy_get_bookmark (struct target_ops
*self
, char *ignore1
, int ignore2
)
3687 /* Error-catcher for target_goto_bookmark. */
3689 dummy_goto_bookmark (struct target_ops
*self
, gdb_byte
*ignore
, int from_tty
)
3694 /* Set up the handful of non-empty slots needed by the dummy target
3698 init_dummy_target (void)
3700 dummy_target
.to_shortname
= "None";
3701 dummy_target
.to_longname
= "None";
3702 dummy_target
.to_doc
= "";
3703 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3704 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3705 dummy_target
.to_supports_disable_randomization
3706 = find_default_supports_disable_randomization
;
3707 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3708 dummy_target
.to_stratum
= dummy_stratum
;
3709 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3710 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3711 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3712 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3713 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3714 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3715 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3716 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3717 dummy_target
.to_has_execution
3718 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3719 dummy_target
.to_magic
= OPS_MAGIC
;
3721 install_dummy_methods (&dummy_target
);
3725 debug_to_open (char *args
, int from_tty
)
3727 debug_target
.to_open (args
, from_tty
);
3729 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3733 target_close (struct target_ops
*targ
)
3735 gdb_assert (!target_is_pushed (targ
));
3737 if (targ
->to_xclose
!= NULL
)
3738 targ
->to_xclose (targ
);
3739 else if (targ
->to_close
!= NULL
)
3740 targ
->to_close (targ
);
3743 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3747 target_attach (char *args
, int from_tty
)
3749 current_target
.to_attach (¤t_target
, args
, from_tty
);
3751 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3756 target_thread_alive (ptid_t ptid
)
3758 struct target_ops
*t
;
3760 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3762 if (t
->to_thread_alive
!= NULL
)
3766 retval
= t
->to_thread_alive (t
, ptid
);
3768 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3769 ptid_get_pid (ptid
), retval
);
3779 target_find_new_threads (void)
3781 struct target_ops
*t
;
3783 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3785 if (t
->to_find_new_threads
!= NULL
)
3787 t
->to_find_new_threads (t
);
3789 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3797 target_stop (ptid_t ptid
)
3801 warning (_("May not interrupt or stop the target, ignoring attempt"));
3805 (*current_target
.to_stop
) (¤t_target
, ptid
);
3809 debug_to_post_attach (struct target_ops
*self
, int pid
)
3811 debug_target
.to_post_attach (&debug_target
, pid
);
3813 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3816 /* Concatenate ELEM to LIST, a comma separate list, and return the
3817 result. The LIST incoming argument is released. */
3820 str_comma_list_concat_elem (char *list
, const char *elem
)
3823 return xstrdup (elem
);
3825 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3828 /* Helper for target_options_to_string. If OPT is present in
3829 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3830 Returns the new resulting string. OPT is removed from
3834 do_option (int *target_options
, char *ret
,
3835 int opt
, char *opt_str
)
3837 if ((*target_options
& opt
) != 0)
3839 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3840 *target_options
&= ~opt
;
3847 target_options_to_string (int target_options
)
3851 #define DO_TARG_OPTION(OPT) \
3852 ret = do_option (&target_options, ret, OPT, #OPT)
3854 DO_TARG_OPTION (TARGET_WNOHANG
);
3856 if (target_options
!= 0)
3857 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3865 debug_print_register (const char * func
,
3866 struct regcache
*regcache
, int regno
)
3868 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3870 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3871 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3872 && gdbarch_register_name (gdbarch
, regno
) != NULL
3873 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3874 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3875 gdbarch_register_name (gdbarch
, regno
));
3877 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3878 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3880 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3881 int i
, size
= register_size (gdbarch
, regno
);
3882 gdb_byte buf
[MAX_REGISTER_SIZE
];
3884 regcache_raw_collect (regcache
, regno
, buf
);
3885 fprintf_unfiltered (gdb_stdlog
, " = ");
3886 for (i
= 0; i
< size
; i
++)
3888 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3890 if (size
<= sizeof (LONGEST
))
3892 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3894 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3895 core_addr_to_string_nz (val
), plongest (val
));
3898 fprintf_unfiltered (gdb_stdlog
, "\n");
3902 target_fetch_registers (struct regcache
*regcache
, int regno
)
3904 struct target_ops
*t
;
3906 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3908 if (t
->to_fetch_registers
!= NULL
)
3910 t
->to_fetch_registers (t
, regcache
, regno
);
3912 debug_print_register ("target_fetch_registers", regcache
, regno
);
3919 target_store_registers (struct regcache
*regcache
, int regno
)
3921 struct target_ops
*t
;
3923 if (!may_write_registers
)
3924 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3926 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3929 debug_print_register ("target_store_registers", regcache
, regno
);
3934 target_core_of_thread (ptid_t ptid
)
3936 struct target_ops
*t
;
3938 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3940 if (t
->to_core_of_thread
!= NULL
)
3942 int retval
= t
->to_core_of_thread (t
, ptid
);
3945 fprintf_unfiltered (gdb_stdlog
,
3946 "target_core_of_thread (%d) = %d\n",
3947 ptid_get_pid (ptid
), retval
);
3956 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3958 struct target_ops
*t
;
3960 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3962 if (t
->to_verify_memory
!= NULL
)
3964 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3967 fprintf_unfiltered (gdb_stdlog
,
3968 "target_verify_memory (%s, %s) = %d\n",
3969 paddress (target_gdbarch (), memaddr
),
3979 /* The documentation for this function is in its prototype declaration in
3983 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3985 struct target_ops
*t
;
3987 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3988 if (t
->to_insert_mask_watchpoint
!= NULL
)
3992 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
3995 fprintf_unfiltered (gdb_stdlog
, "\
3996 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3997 core_addr_to_string (addr
),
3998 core_addr_to_string (mask
), rw
, ret
);
4006 /* The documentation for this function is in its prototype declaration in
4010 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
4012 struct target_ops
*t
;
4014 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4015 if (t
->to_remove_mask_watchpoint
!= NULL
)
4019 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
4022 fprintf_unfiltered (gdb_stdlog
, "\
4023 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4024 core_addr_to_string (addr
),
4025 core_addr_to_string (mask
), rw
, ret
);
4033 /* The documentation for this function is in its prototype declaration
4037 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
4039 struct target_ops
*t
;
4041 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4042 if (t
->to_masked_watch_num_registers
!= NULL
)
4043 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
4048 /* The documentation for this function is in its prototype declaration
4052 target_ranged_break_num_registers (void)
4054 struct target_ops
*t
;
4056 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4057 if (t
->to_ranged_break_num_registers
!= NULL
)
4058 return t
->to_ranged_break_num_registers (t
);
4065 struct btrace_target_info
*
4066 target_enable_btrace (ptid_t ptid
)
4068 struct target_ops
*t
;
4070 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4071 if (t
->to_enable_btrace
!= NULL
)
4072 return t
->to_enable_btrace (t
, ptid
);
4081 target_disable_btrace (struct btrace_target_info
*btinfo
)
4083 struct target_ops
*t
;
4085 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4086 if (t
->to_disable_btrace
!= NULL
)
4088 t
->to_disable_btrace (t
, btinfo
);
4098 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4100 struct target_ops
*t
;
4102 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4103 if (t
->to_teardown_btrace
!= NULL
)
4105 t
->to_teardown_btrace (t
, btinfo
);
4115 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4116 struct btrace_target_info
*btinfo
,
4117 enum btrace_read_type type
)
4119 struct target_ops
*t
;
4121 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4122 if (t
->to_read_btrace
!= NULL
)
4123 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
4126 return BTRACE_ERR_NOT_SUPPORTED
;
4132 target_stop_recording (void)
4134 struct target_ops
*t
;
4136 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4137 if (t
->to_stop_recording
!= NULL
)
4139 t
->to_stop_recording (t
);
4143 /* This is optional. */
4149 target_info_record (void)
4151 struct target_ops
*t
;
4153 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4154 if (t
->to_info_record
!= NULL
)
4156 t
->to_info_record (t
);
4166 target_save_record (const char *filename
)
4168 struct target_ops
*t
;
4170 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4171 if (t
->to_save_record
!= NULL
)
4173 t
->to_save_record (t
, filename
);
4183 target_supports_delete_record (void)
4185 struct target_ops
*t
;
4187 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4188 if (t
->to_delete_record
!= NULL
)
4197 target_delete_record (void)
4199 struct target_ops
*t
;
4201 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4202 if (t
->to_delete_record
!= NULL
)
4204 t
->to_delete_record (t
);
4214 target_record_is_replaying (void)
4216 struct target_ops
*t
;
4218 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4219 if (t
->to_record_is_replaying
!= NULL
)
4220 return t
->to_record_is_replaying (t
);
4228 target_goto_record_begin (void)
4230 struct target_ops
*t
;
4232 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4233 if (t
->to_goto_record_begin
!= NULL
)
4235 t
->to_goto_record_begin (t
);
4245 target_goto_record_end (void)
4247 struct target_ops
*t
;
4249 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4250 if (t
->to_goto_record_end
!= NULL
)
4252 t
->to_goto_record_end (t
);
4262 target_goto_record (ULONGEST insn
)
4264 struct target_ops
*t
;
4266 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4267 if (t
->to_goto_record
!= NULL
)
4269 t
->to_goto_record (t
, insn
);
4279 target_insn_history (int size
, int flags
)
4281 struct target_ops
*t
;
4283 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4284 if (t
->to_insn_history
!= NULL
)
4286 t
->to_insn_history (t
, size
, flags
);
4296 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4298 struct target_ops
*t
;
4300 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4301 if (t
->to_insn_history_from
!= NULL
)
4303 t
->to_insn_history_from (t
, from
, size
, flags
);
4313 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4315 struct target_ops
*t
;
4317 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4318 if (t
->to_insn_history_range
!= NULL
)
4320 t
->to_insn_history_range (t
, begin
, end
, flags
);
4330 target_call_history (int size
, int flags
)
4332 struct target_ops
*t
;
4334 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4335 if (t
->to_call_history
!= NULL
)
4337 t
->to_call_history (t
, size
, flags
);
4347 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4349 struct target_ops
*t
;
4351 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4352 if (t
->to_call_history_from
!= NULL
)
4354 t
->to_call_history_from (t
, begin
, size
, flags
);
4364 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4366 struct target_ops
*t
;
4368 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4369 if (t
->to_call_history_range
!= NULL
)
4371 t
->to_call_history_range (t
, begin
, end
, flags
);
4379 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4381 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4383 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4388 const struct frame_unwind
*
4389 target_get_unwinder (void)
4391 struct target_ops
*t
;
4393 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4394 if (t
->to_get_unwinder
!= NULL
)
4395 return t
->to_get_unwinder
;
4402 const struct frame_unwind
*
4403 target_get_tailcall_unwinder (void)
4405 struct target_ops
*t
;
4407 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4408 if (t
->to_get_tailcall_unwinder
!= NULL
)
4409 return t
->to_get_tailcall_unwinder
;
4417 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4418 struct gdbarch
*gdbarch
)
4420 for (; ops
!= NULL
; ops
= ops
->beneath
)
4421 if (ops
->to_decr_pc_after_break
!= NULL
)
4422 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4424 return gdbarch_decr_pc_after_break (gdbarch
);
4430 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4432 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4436 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4437 int write
, struct mem_attrib
*attrib
,
4438 struct target_ops
*target
)
4442 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4445 fprintf_unfiltered (gdb_stdlog
,
4446 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4447 paddress (target_gdbarch (), memaddr
), len
,
4448 write
? "write" : "read", retval
);
4454 fputs_unfiltered (", bytes =", gdb_stdlog
);
4455 for (i
= 0; i
< retval
; i
++)
4457 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4459 if (targetdebug
< 2 && i
> 0)
4461 fprintf_unfiltered (gdb_stdlog
, " ...");
4464 fprintf_unfiltered (gdb_stdlog
, "\n");
4467 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4471 fputc_unfiltered ('\n', gdb_stdlog
);
4477 debug_to_files_info (struct target_ops
*target
)
4479 debug_target
.to_files_info (target
);
4481 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4485 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4486 struct bp_target_info
*bp_tgt
)
4490 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4492 fprintf_unfiltered (gdb_stdlog
,
4493 "target_insert_breakpoint (%s, xxx) = %ld\n",
4494 core_addr_to_string (bp_tgt
->placed_address
),
4495 (unsigned long) retval
);
4500 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4501 struct bp_target_info
*bp_tgt
)
4505 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4507 fprintf_unfiltered (gdb_stdlog
,
4508 "target_remove_breakpoint (%s, xxx) = %ld\n",
4509 core_addr_to_string (bp_tgt
->placed_address
),
4510 (unsigned long) retval
);
4515 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4516 int type
, int cnt
, int from_tty
)
4520 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4521 type
, cnt
, from_tty
);
4523 fprintf_unfiltered (gdb_stdlog
,
4524 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4525 (unsigned long) type
,
4526 (unsigned long) cnt
,
4527 (unsigned long) from_tty
,
4528 (unsigned long) retval
);
4533 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4534 CORE_ADDR addr
, int len
)
4538 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4541 fprintf_unfiltered (gdb_stdlog
,
4542 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4543 core_addr_to_string (addr
), (unsigned long) len
,
4544 core_addr_to_string (retval
));
4549 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4550 CORE_ADDR addr
, int len
, int rw
,
4551 struct expression
*cond
)
4555 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4559 fprintf_unfiltered (gdb_stdlog
,
4560 "target_can_accel_watchpoint_condition "
4561 "(%s, %d, %d, %s) = %ld\n",
4562 core_addr_to_string (addr
), len
, rw
,
4563 host_address_to_string (cond
), (unsigned long) retval
);
4568 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4572 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4574 fprintf_unfiltered (gdb_stdlog
,
4575 "target_stopped_by_watchpoint () = %ld\n",
4576 (unsigned long) retval
);
4581 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4585 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4587 fprintf_unfiltered (gdb_stdlog
,
4588 "target_stopped_data_address ([%s]) = %ld\n",
4589 core_addr_to_string (*addr
),
4590 (unsigned long)retval
);
4595 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4597 CORE_ADDR start
, int length
)
4601 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4604 fprintf_filtered (gdb_stdlog
,
4605 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4606 core_addr_to_string (addr
), core_addr_to_string (start
),
4612 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4613 struct gdbarch
*gdbarch
,
4614 struct bp_target_info
*bp_tgt
)
4618 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4621 fprintf_unfiltered (gdb_stdlog
,
4622 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4623 core_addr_to_string (bp_tgt
->placed_address
),
4624 (unsigned long) retval
);
4629 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4630 struct gdbarch
*gdbarch
,
4631 struct bp_target_info
*bp_tgt
)
4635 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4638 fprintf_unfiltered (gdb_stdlog
,
4639 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4640 core_addr_to_string (bp_tgt
->placed_address
),
4641 (unsigned long) retval
);
4646 debug_to_insert_watchpoint (struct target_ops
*self
,
4647 CORE_ADDR addr
, int len
, int type
,
4648 struct expression
*cond
)
4652 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4653 addr
, len
, type
, cond
);
4655 fprintf_unfiltered (gdb_stdlog
,
4656 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4657 core_addr_to_string (addr
), len
, type
,
4658 host_address_to_string (cond
), (unsigned long) retval
);
4663 debug_to_remove_watchpoint (struct target_ops
*self
,
4664 CORE_ADDR addr
, int len
, int type
,
4665 struct expression
*cond
)
4669 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4670 addr
, len
, type
, cond
);
4672 fprintf_unfiltered (gdb_stdlog
,
4673 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4674 core_addr_to_string (addr
), len
, type
,
4675 host_address_to_string (cond
), (unsigned long) retval
);
4680 debug_to_terminal_init (struct target_ops
*self
)
4682 debug_target
.to_terminal_init (&debug_target
);
4684 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4688 debug_to_terminal_inferior (struct target_ops
*self
)
4690 debug_target
.to_terminal_inferior (&debug_target
);
4692 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4696 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4698 debug_target
.to_terminal_ours_for_output (&debug_target
);
4700 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4704 debug_to_terminal_ours (struct target_ops
*self
)
4706 debug_target
.to_terminal_ours (&debug_target
);
4708 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4712 debug_to_terminal_save_ours (struct target_ops
*self
)
4714 debug_target
.to_terminal_save_ours (&debug_target
);
4716 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4720 debug_to_terminal_info (struct target_ops
*self
,
4721 const char *arg
, int from_tty
)
4723 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4725 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4730 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4732 debug_target
.to_load (&debug_target
, args
, from_tty
);
4734 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4738 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4740 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4742 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4743 ptid_get_pid (ptid
));
4747 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4751 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4753 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4760 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4764 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4766 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4773 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4777 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4779 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4786 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4790 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4792 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4799 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4803 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4805 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4812 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4816 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4818 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4825 debug_to_has_exited (struct target_ops
*self
,
4826 int pid
, int wait_status
, int *exit_status
)
4830 has_exited
= debug_target
.to_has_exited (&debug_target
,
4831 pid
, wait_status
, exit_status
);
4833 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4834 pid
, wait_status
, *exit_status
, has_exited
);
4840 debug_to_can_run (struct target_ops
*self
)
4844 retval
= debug_target
.to_can_run (&debug_target
);
4846 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4851 static struct gdbarch
*
4852 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4854 struct gdbarch
*retval
;
4856 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4858 fprintf_unfiltered (gdb_stdlog
,
4859 "target_thread_architecture (%s) = %s [%s]\n",
4860 target_pid_to_str (ptid
),
4861 host_address_to_string (retval
),
4862 gdbarch_bfd_arch_info (retval
)->printable_name
);
4867 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4869 debug_target
.to_stop (&debug_target
, ptid
);
4871 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4872 target_pid_to_str (ptid
));
4876 debug_to_rcmd (struct target_ops
*self
, char *command
,
4877 struct ui_file
*outbuf
)
4879 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4880 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4884 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4888 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4890 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4897 setup_target_debug (void)
4899 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4901 current_target
.to_open
= debug_to_open
;
4902 current_target
.to_post_attach
= debug_to_post_attach
;
4903 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4904 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4905 current_target
.to_files_info
= debug_to_files_info
;
4906 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4907 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4908 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4909 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4910 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4911 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4912 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4913 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4914 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4915 current_target
.to_watchpoint_addr_within_range
4916 = debug_to_watchpoint_addr_within_range
;
4917 current_target
.to_region_ok_for_hw_watchpoint
4918 = debug_to_region_ok_for_hw_watchpoint
;
4919 current_target
.to_can_accel_watchpoint_condition
4920 = debug_to_can_accel_watchpoint_condition
;
4921 current_target
.to_terminal_init
= debug_to_terminal_init
;
4922 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4923 current_target
.to_terminal_ours_for_output
4924 = debug_to_terminal_ours_for_output
;
4925 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4926 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4927 current_target
.to_terminal_info
= debug_to_terminal_info
;
4928 current_target
.to_load
= debug_to_load
;
4929 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4930 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4931 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4932 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4933 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4934 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4935 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4936 current_target
.to_has_exited
= debug_to_has_exited
;
4937 current_target
.to_can_run
= debug_to_can_run
;
4938 current_target
.to_stop
= debug_to_stop
;
4939 current_target
.to_rcmd
= debug_to_rcmd
;
4940 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4941 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4945 static char targ_desc
[] =
4946 "Names of targets and files being debugged.\nShows the entire \
4947 stack of targets currently in use (including the exec-file,\n\
4948 core-file, and process, if any), as well as the symbol file name.";
4951 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4953 error (_("\"monitor\" command not supported by this target."));
4957 do_monitor_command (char *cmd
,
4960 target_rcmd (cmd
, gdb_stdtarg
);
4963 /* Print the name of each layers of our target stack. */
4966 maintenance_print_target_stack (char *cmd
, int from_tty
)
4968 struct target_ops
*t
;
4970 printf_filtered (_("The current target stack is:\n"));
4972 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4974 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4978 /* Controls if async mode is permitted. */
4979 int target_async_permitted
= 0;
4981 /* The set command writes to this variable. If the inferior is
4982 executing, target_async_permitted is *not* updated. */
4983 static int target_async_permitted_1
= 0;
4986 set_target_async_command (char *args
, int from_tty
,
4987 struct cmd_list_element
*c
)
4989 if (have_live_inferiors ())
4991 target_async_permitted_1
= target_async_permitted
;
4992 error (_("Cannot change this setting while the inferior is running."));
4995 target_async_permitted
= target_async_permitted_1
;
4999 show_target_async_command (struct ui_file
*file
, int from_tty
,
5000 struct cmd_list_element
*c
,
5003 fprintf_filtered (file
,
5004 _("Controlling the inferior in "
5005 "asynchronous mode is %s.\n"), value
);
5008 /* Temporary copies of permission settings. */
5010 static int may_write_registers_1
= 1;
5011 static int may_write_memory_1
= 1;
5012 static int may_insert_breakpoints_1
= 1;
5013 static int may_insert_tracepoints_1
= 1;
5014 static int may_insert_fast_tracepoints_1
= 1;
5015 static int may_stop_1
= 1;
5017 /* Make the user-set values match the real values again. */
5020 update_target_permissions (void)
5022 may_write_registers_1
= may_write_registers
;
5023 may_write_memory_1
= may_write_memory
;
5024 may_insert_breakpoints_1
= may_insert_breakpoints
;
5025 may_insert_tracepoints_1
= may_insert_tracepoints
;
5026 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
5027 may_stop_1
= may_stop
;
5030 /* The one function handles (most of) the permission flags in the same
5034 set_target_permissions (char *args
, int from_tty
,
5035 struct cmd_list_element
*c
)
5037 if (target_has_execution
)
5039 update_target_permissions ();
5040 error (_("Cannot change this setting while the inferior is running."));
5043 /* Make the real values match the user-changed values. */
5044 may_write_registers
= may_write_registers_1
;
5045 may_insert_breakpoints
= may_insert_breakpoints_1
;
5046 may_insert_tracepoints
= may_insert_tracepoints_1
;
5047 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
5048 may_stop
= may_stop_1
;
5049 update_observer_mode ();
5052 /* Set memory write permission independently of observer mode. */
5055 set_write_memory_permission (char *args
, int from_tty
,
5056 struct cmd_list_element
*c
)
5058 /* Make the real values match the user-changed values. */
5059 may_write_memory
= may_write_memory_1
;
5060 update_observer_mode ();
5065 initialize_targets (void)
5067 init_dummy_target ();
5068 push_target (&dummy_target
);
5070 add_info ("target", target_info
, targ_desc
);
5071 add_info ("files", target_info
, targ_desc
);
5073 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
5074 Set target debugging."), _("\
5075 Show target debugging."), _("\
5076 When non-zero, target debugging is enabled. Higher numbers are more\n\
5077 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5081 &setdebuglist
, &showdebuglist
);
5083 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
5084 &trust_readonly
, _("\
5085 Set mode for reading from readonly sections."), _("\
5086 Show mode for reading from readonly sections."), _("\
5087 When this mode is on, memory reads from readonly sections (such as .text)\n\
5088 will be read from the object file instead of from the target. This will\n\
5089 result in significant performance improvement for remote targets."),
5091 show_trust_readonly
,
5092 &setlist
, &showlist
);
5094 add_com ("monitor", class_obscure
, do_monitor_command
,
5095 _("Send a command to the remote monitor (remote targets only)."));
5097 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5098 _("Print the name of each layer of the internal target stack."),
5099 &maintenanceprintlist
);
5101 add_setshow_boolean_cmd ("target-async", no_class
,
5102 &target_async_permitted_1
, _("\
5103 Set whether gdb controls the inferior in asynchronous mode."), _("\
5104 Show whether gdb controls the inferior in asynchronous mode."), _("\
5105 Tells gdb whether to control the inferior in asynchronous mode."),
5106 set_target_async_command
,
5107 show_target_async_command
,
5111 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5112 &may_write_registers_1
, _("\
5113 Set permission to write into registers."), _("\
5114 Show permission to write into registers."), _("\
5115 When this permission is on, GDB may write into the target's registers.\n\
5116 Otherwise, any sort of write attempt will result in an error."),
5117 set_target_permissions
, NULL
,
5118 &setlist
, &showlist
);
5120 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5121 &may_write_memory_1
, _("\
5122 Set permission to write into target memory."), _("\
5123 Show permission to write into target memory."), _("\
5124 When this permission is on, GDB may write into the target's memory.\n\
5125 Otherwise, any sort of write attempt will result in an error."),
5126 set_write_memory_permission
, NULL
,
5127 &setlist
, &showlist
);
5129 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5130 &may_insert_breakpoints_1
, _("\
5131 Set permission to insert breakpoints in the target."), _("\
5132 Show permission to insert breakpoints in the target."), _("\
5133 When this permission is on, GDB may insert breakpoints in the program.\n\
5134 Otherwise, any sort of insertion attempt will result in an error."),
5135 set_target_permissions
, NULL
,
5136 &setlist
, &showlist
);
5138 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5139 &may_insert_tracepoints_1
, _("\
5140 Set permission to insert tracepoints in the target."), _("\
5141 Show permission to insert tracepoints in the target."), _("\
5142 When this permission is on, GDB may insert tracepoints in the program.\n\
5143 Otherwise, any sort of insertion attempt will result in an error."),
5144 set_target_permissions
, NULL
,
5145 &setlist
, &showlist
);
5147 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5148 &may_insert_fast_tracepoints_1
, _("\
5149 Set permission to insert fast tracepoints in the target."), _("\
5150 Show permission to insert fast tracepoints in the target."), _("\
5151 When this permission is on, GDB may insert fast tracepoints.\n\
5152 Otherwise, any sort of insertion attempt will result in an error."),
5153 set_target_permissions
, NULL
,
5154 &setlist
, &showlist
);
5156 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5158 Set permission to interrupt or signal the target."), _("\
5159 Show permission to interrupt or signal the target."), _("\
5160 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5161 Otherwise, any attempt to interrupt or stop will be ignored."),
5162 set_target_permissions
, NULL
,
5163 &setlist
, &showlist
);