1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
46 static void target_info (char *, int);
48 static void default_terminal_info (char *, int);
50 static int default_watchpoint_addr_within_range (struct target_ops
*,
51 CORE_ADDR
, CORE_ADDR
, int);
53 static int default_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
55 static void tcomplain (void) ATTRIBUTE_NORETURN
;
57 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
59 static int return_zero (void);
61 static int return_one (void);
63 static int return_minus_one (void);
65 void target_ignore (void);
67 static void target_command (char *, int);
69 static struct target_ops
*find_default_run_target (char *);
71 static LONGEST
default_xfer_partial (struct target_ops
*ops
,
72 enum target_object object
,
73 const char *annex
, gdb_byte
*readbuf
,
74 const gdb_byte
*writebuf
,
75 ULONGEST offset
, LONGEST len
);
77 static LONGEST
current_xfer_partial (struct target_ops
*ops
,
78 enum target_object object
,
79 const char *annex
, gdb_byte
*readbuf
,
80 const gdb_byte
*writebuf
,
81 ULONGEST offset
, LONGEST len
);
83 static LONGEST
target_xfer_partial (struct target_ops
*ops
,
84 enum target_object object
,
86 void *readbuf
, const void *writebuf
,
87 ULONGEST offset
, LONGEST len
);
89 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
92 static void init_dummy_target (void);
94 static struct target_ops debug_target
;
96 static void debug_to_open (char *, int);
98 static void debug_to_prepare_to_store (struct regcache
*);
100 static void debug_to_files_info (struct target_ops
*);
102 static int debug_to_insert_breakpoint (struct gdbarch
*,
103 struct bp_target_info
*);
105 static int debug_to_remove_breakpoint (struct gdbarch
*,
106 struct bp_target_info
*);
108 static int debug_to_can_use_hw_breakpoint (int, int, int);
110 static int debug_to_insert_hw_breakpoint (struct gdbarch
*,
111 struct bp_target_info
*);
113 static int debug_to_remove_hw_breakpoint (struct gdbarch
*,
114 struct bp_target_info
*);
116 static int debug_to_insert_watchpoint (CORE_ADDR
, int, int,
117 struct expression
*);
119 static int debug_to_remove_watchpoint (CORE_ADDR
, int, int,
120 struct expression
*);
122 static int debug_to_stopped_by_watchpoint (void);
124 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
126 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
127 CORE_ADDR
, CORE_ADDR
, int);
129 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR
, int);
131 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR
, int, int,
132 struct expression
*);
134 static void debug_to_terminal_init (void);
136 static void debug_to_terminal_inferior (void);
138 static void debug_to_terminal_ours_for_output (void);
140 static void debug_to_terminal_save_ours (void);
142 static void debug_to_terminal_ours (void);
144 static void debug_to_terminal_info (char *, int);
146 static void debug_to_load (char *, int);
148 static int debug_to_can_run (void);
150 static void debug_to_stop (ptid_t
);
152 /* Pointer to array of target architecture structures; the size of the
153 array; the current index into the array; the allocated size of the
155 struct target_ops
**target_structs
;
156 unsigned target_struct_size
;
157 unsigned target_struct_index
;
158 unsigned target_struct_allocsize
;
159 #define DEFAULT_ALLOCSIZE 10
161 /* The initial current target, so that there is always a semi-valid
164 static struct target_ops dummy_target
;
166 /* Top of target stack. */
168 static struct target_ops
*target_stack
;
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
173 struct target_ops current_target
;
175 /* Command list for target. */
177 static struct cmd_list_element
*targetlist
= NULL
;
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
182 static int trust_readonly
= 0;
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
187 static int show_memory_breakpoints
= 0;
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
193 int may_write_registers
= 1;
195 int may_write_memory
= 1;
197 int may_insert_breakpoints
= 1;
199 int may_insert_tracepoints
= 1;
201 int may_insert_fast_tracepoints
= 1;
205 /* Non-zero if we want to see trace of target level stuff. */
207 static int targetdebug
= 0;
209 show_targetdebug (struct ui_file
*file
, int from_tty
,
210 struct cmd_list_element
*c
, const char *value
)
212 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
215 static void setup_target_debug (void);
217 /* The option sets this. */
218 static int stack_cache_enabled_p_1
= 1;
219 /* And set_stack_cache_enabled_p updates this.
220 The reason for the separation is so that we don't flush the cache for
221 on->on transitions. */
222 static int stack_cache_enabled_p
= 1;
224 /* This is called *after* the stack-cache has been set.
225 Flush the cache for off->on and on->off transitions.
226 There's no real need to flush the cache for on->off transitions,
227 except cleanliness. */
230 set_stack_cache_enabled_p (char *args
, int from_tty
,
231 struct cmd_list_element
*c
)
233 if (stack_cache_enabled_p
!= stack_cache_enabled_p_1
)
234 target_dcache_invalidate ();
236 stack_cache_enabled_p
= stack_cache_enabled_p_1
;
240 show_stack_cache_enabled_p (struct ui_file
*file
, int from_tty
,
241 struct cmd_list_element
*c
, const char *value
)
243 fprintf_filtered (file
, _("Cache use for stack accesses is %s.\n"), value
);
246 /* Cache of memory operations, to speed up remote access. */
247 static DCACHE
*target_dcache
;
249 /* Invalidate the target dcache. */
252 target_dcache_invalidate (void)
254 dcache_invalidate (target_dcache
);
257 /* The user just typed 'target' without the name of a target. */
260 target_command (char *arg
, int from_tty
)
262 fputs_filtered ("Argument required (target name). Try `help target'\n",
266 /* Default target_has_* methods for process_stratum targets. */
269 default_child_has_all_memory (struct target_ops
*ops
)
271 /* If no inferior selected, then we can't read memory here. */
272 if (ptid_equal (inferior_ptid
, null_ptid
))
279 default_child_has_memory (struct target_ops
*ops
)
281 /* If no inferior selected, then we can't read memory here. */
282 if (ptid_equal (inferior_ptid
, null_ptid
))
289 default_child_has_stack (struct target_ops
*ops
)
291 /* If no inferior selected, there's no stack. */
292 if (ptid_equal (inferior_ptid
, null_ptid
))
299 default_child_has_registers (struct target_ops
*ops
)
301 /* Can't read registers from no inferior. */
302 if (ptid_equal (inferior_ptid
, null_ptid
))
309 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
311 /* If there's no thread selected, then we can't make it run through
313 if (ptid_equal (the_ptid
, null_ptid
))
321 target_has_all_memory_1 (void)
323 struct target_ops
*t
;
325 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
326 if (t
->to_has_all_memory (t
))
333 target_has_memory_1 (void)
335 struct target_ops
*t
;
337 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
338 if (t
->to_has_memory (t
))
345 target_has_stack_1 (void)
347 struct target_ops
*t
;
349 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
350 if (t
->to_has_stack (t
))
357 target_has_registers_1 (void)
359 struct target_ops
*t
;
361 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
362 if (t
->to_has_registers (t
))
369 target_has_execution_1 (ptid_t the_ptid
)
371 struct target_ops
*t
;
373 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
374 if (t
->to_has_execution (t
, the_ptid
))
381 target_has_execution_current (void)
383 return target_has_execution_1 (inferior_ptid
);
386 /* Add a possible target architecture to the list. */
389 add_target (struct target_ops
*t
)
391 /* Provide default values for all "must have" methods. */
392 if (t
->to_xfer_partial
== NULL
)
393 t
->to_xfer_partial
= default_xfer_partial
;
395 if (t
->to_has_all_memory
== NULL
)
396 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
398 if (t
->to_has_memory
== NULL
)
399 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
401 if (t
->to_has_stack
== NULL
)
402 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
404 if (t
->to_has_registers
== NULL
)
405 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
407 if (t
->to_has_execution
== NULL
)
408 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
412 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
413 target_structs
= (struct target_ops
**) xmalloc
414 (target_struct_allocsize
* sizeof (*target_structs
));
416 if (target_struct_size
>= target_struct_allocsize
)
418 target_struct_allocsize
*= 2;
419 target_structs
= (struct target_ops
**)
420 xrealloc ((char *) target_structs
,
421 target_struct_allocsize
* sizeof (*target_structs
));
423 target_structs
[target_struct_size
++] = t
;
425 if (targetlist
== NULL
)
426 add_prefix_cmd ("target", class_run
, target_command
, _("\
427 Connect to a target machine or process.\n\
428 The first argument is the type or protocol of the target machine.\n\
429 Remaining arguments are interpreted by the target protocol. For more\n\
430 information on the arguments for a particular protocol, type\n\
431 `help target ' followed by the protocol name."),
432 &targetlist
, "target ", 0, &cmdlist
);
433 add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
446 struct target_ops
*t
;
448 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
449 if (t
->to_kill
!= NULL
)
452 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
462 target_load (char *arg
, int from_tty
)
464 target_dcache_invalidate ();
465 (*current_target
.to_load
) (arg
, from_tty
);
469 target_create_inferior (char *exec_file
, char *args
,
470 char **env
, int from_tty
)
472 struct target_ops
*t
;
474 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
476 if (t
->to_create_inferior
!= NULL
)
478 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
480 fprintf_unfiltered (gdb_stdlog
,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file
, args
, from_tty
);
487 internal_error (__FILE__
, __LINE__
,
488 _("could not find a target to create inferior"));
492 target_terminal_inferior (void)
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution
)
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target
.to_terminal_inferior
) ();
507 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
508 struct target_ops
*t
)
510 errno
= EIO
; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
517 error (_("You can't do that when your target is `%s'"),
518 current_target
.to_shortname
);
524 error (_("You can't do that without a process to debug."));
528 default_terminal_info (char *args
, int from_tty
)
530 printf_unfiltered (_("No saved terminal information.\n"));
533 /* A default implementation for the to_get_ada_task_ptid target method.
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
540 default_get_ada_task_ptid (long lwp
, long tid
)
542 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
545 static enum exec_direction_kind
546 default_execution_direction (void)
548 if (!target_can_execute_reverse
)
550 else if (!target_can_async_p ())
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
570 update_current_target (void)
572 struct target_ops
*t
;
574 /* First, reset current's contents. */
575 memset (¤t_target
, 0, sizeof (current_target
));
577 #define INHERIT(FIELD, TARGET) \
578 if (!current_target.FIELD) \
579 current_target.FIELD = (TARGET)->FIELD
581 for (t
= target_stack
; t
; t
= t
->beneath
)
583 INHERIT (to_shortname
, t
);
584 INHERIT (to_longname
, t
);
586 /* Do not inherit to_open. */
587 /* Do not inherit to_close. */
588 /* Do not inherit to_attach. */
589 INHERIT (to_post_attach
, t
);
590 INHERIT (to_attach_no_wait
, t
);
591 /* Do not inherit to_detach. */
592 /* Do not inherit to_disconnect. */
593 /* Do not inherit to_resume. */
594 /* Do not inherit to_wait. */
595 /* Do not inherit to_fetch_registers. */
596 /* Do not inherit to_store_registers. */
597 INHERIT (to_prepare_to_store
, t
);
598 INHERIT (deprecated_xfer_memory
, t
);
599 INHERIT (to_files_info
, t
);
600 INHERIT (to_insert_breakpoint
, t
);
601 INHERIT (to_remove_breakpoint
, t
);
602 INHERIT (to_can_use_hw_breakpoint
, t
);
603 INHERIT (to_insert_hw_breakpoint
, t
);
604 INHERIT (to_remove_hw_breakpoint
, t
);
605 /* Do not inherit to_ranged_break_num_registers. */
606 INHERIT (to_insert_watchpoint
, t
);
607 INHERIT (to_remove_watchpoint
, t
);
608 /* Do not inherit to_insert_mask_watchpoint. */
609 /* Do not inherit to_remove_mask_watchpoint. */
610 INHERIT (to_stopped_data_address
, t
);
611 INHERIT (to_have_steppable_watchpoint
, t
);
612 INHERIT (to_have_continuable_watchpoint
, t
);
613 INHERIT (to_stopped_by_watchpoint
, t
);
614 INHERIT (to_watchpoint_addr_within_range
, t
);
615 INHERIT (to_region_ok_for_hw_watchpoint
, t
);
616 INHERIT (to_can_accel_watchpoint_condition
, t
);
617 /* Do not inherit to_masked_watch_num_registers. */
618 INHERIT (to_terminal_init
, t
);
619 INHERIT (to_terminal_inferior
, t
);
620 INHERIT (to_terminal_ours_for_output
, t
);
621 INHERIT (to_terminal_ours
, t
);
622 INHERIT (to_terminal_save_ours
, t
);
623 INHERIT (to_terminal_info
, t
);
624 /* Do not inherit to_kill. */
625 INHERIT (to_load
, t
);
626 /* Do no inherit to_create_inferior. */
627 INHERIT (to_post_startup_inferior
, t
);
628 INHERIT (to_insert_fork_catchpoint
, t
);
629 INHERIT (to_remove_fork_catchpoint
, t
);
630 INHERIT (to_insert_vfork_catchpoint
, t
);
631 INHERIT (to_remove_vfork_catchpoint
, t
);
632 /* Do not inherit to_follow_fork. */
633 INHERIT (to_insert_exec_catchpoint
, t
);
634 INHERIT (to_remove_exec_catchpoint
, t
);
635 INHERIT (to_set_syscall_catchpoint
, t
);
636 INHERIT (to_has_exited
, t
);
637 /* Do not inherit to_mourn_inferior. */
638 INHERIT (to_can_run
, t
);
639 /* Do not inherit to_pass_signals. */
640 /* Do not inherit to_thread_alive. */
641 /* Do not inherit to_find_new_threads. */
642 /* Do not inherit to_pid_to_str. */
643 INHERIT (to_extra_thread_info
, t
);
644 INHERIT (to_thread_name
, t
);
645 INHERIT (to_stop
, t
);
646 /* Do not inherit to_xfer_partial. */
647 INHERIT (to_rcmd
, t
);
648 INHERIT (to_pid_to_exec_file
, t
);
649 INHERIT (to_log_command
, t
);
650 INHERIT (to_stratum
, t
);
651 /* Do not inherit to_has_all_memory. */
652 /* Do not inherit to_has_memory. */
653 /* Do not inherit to_has_stack. */
654 /* Do not inherit to_has_registers. */
655 /* Do not inherit to_has_execution. */
656 INHERIT (to_has_thread_control
, t
);
657 INHERIT (to_can_async_p
, t
);
658 INHERIT (to_is_async_p
, t
);
659 INHERIT (to_async
, t
);
660 INHERIT (to_find_memory_regions
, t
);
661 INHERIT (to_make_corefile_notes
, t
);
662 INHERIT (to_get_bookmark
, t
);
663 INHERIT (to_goto_bookmark
, t
);
664 /* Do not inherit to_get_thread_local_address. */
665 INHERIT (to_can_execute_reverse
, t
);
666 INHERIT (to_execution_direction
, t
);
667 INHERIT (to_thread_architecture
, t
);
668 /* Do not inherit to_read_description. */
669 INHERIT (to_get_ada_task_ptid
, t
);
670 /* Do not inherit to_search_memory. */
671 INHERIT (to_supports_multi_process
, t
);
672 INHERIT (to_supports_enable_disable_tracepoint
, t
);
673 INHERIT (to_supports_string_tracing
, t
);
674 INHERIT (to_trace_init
, t
);
675 INHERIT (to_download_tracepoint
, t
);
676 INHERIT (to_can_download_tracepoint
, t
);
677 INHERIT (to_download_trace_state_variable
, t
);
678 INHERIT (to_enable_tracepoint
, t
);
679 INHERIT (to_disable_tracepoint
, t
);
680 INHERIT (to_trace_set_readonly_regions
, t
);
681 INHERIT (to_trace_start
, t
);
682 INHERIT (to_get_trace_status
, t
);
683 INHERIT (to_get_tracepoint_status
, t
);
684 INHERIT (to_trace_stop
, t
);
685 INHERIT (to_trace_find
, t
);
686 INHERIT (to_get_trace_state_variable_value
, t
);
687 INHERIT (to_save_trace_data
, t
);
688 INHERIT (to_upload_tracepoints
, t
);
689 INHERIT (to_upload_trace_state_variables
, t
);
690 INHERIT (to_get_raw_trace_data
, t
);
691 INHERIT (to_get_min_fast_tracepoint_insn_len
, t
);
692 INHERIT (to_set_disconnected_tracing
, t
);
693 INHERIT (to_set_circular_trace_buffer
, t
);
694 INHERIT (to_set_trace_notes
, t
);
695 INHERIT (to_get_tib_address
, t
);
696 INHERIT (to_set_permissions
, t
);
697 INHERIT (to_static_tracepoint_marker_at
, t
);
698 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
699 INHERIT (to_traceframe_info
, t
);
700 INHERIT (to_magic
, t
);
701 /* Do not inherit to_memory_map. */
702 /* Do not inherit to_flash_erase. */
703 /* Do not inherit to_flash_done. */
707 /* Clean up a target struct so it no longer has any zero pointers in
708 it. Some entries are defaulted to a method that print an error,
709 others are hard-wired to a standard recursive default. */
711 #define de_fault(field, value) \
712 if (!current_target.field) \
713 current_target.field = value
716 (void (*) (char *, int))
721 de_fault (to_post_attach
,
724 de_fault (to_prepare_to_store
,
725 (void (*) (struct regcache
*))
727 de_fault (deprecated_xfer_memory
,
728 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
729 struct mem_attrib
*, struct target_ops
*))
731 de_fault (to_files_info
,
732 (void (*) (struct target_ops
*))
734 de_fault (to_insert_breakpoint
,
735 memory_insert_breakpoint
);
736 de_fault (to_remove_breakpoint
,
737 memory_remove_breakpoint
);
738 de_fault (to_can_use_hw_breakpoint
,
739 (int (*) (int, int, int))
741 de_fault (to_insert_hw_breakpoint
,
742 (int (*) (struct gdbarch
*, struct bp_target_info
*))
744 de_fault (to_remove_hw_breakpoint
,
745 (int (*) (struct gdbarch
*, struct bp_target_info
*))
747 de_fault (to_insert_watchpoint
,
748 (int (*) (CORE_ADDR
, int, int, struct expression
*))
750 de_fault (to_remove_watchpoint
,
751 (int (*) (CORE_ADDR
, int, int, struct expression
*))
753 de_fault (to_stopped_by_watchpoint
,
756 de_fault (to_stopped_data_address
,
757 (int (*) (struct target_ops
*, CORE_ADDR
*))
759 de_fault (to_watchpoint_addr_within_range
,
760 default_watchpoint_addr_within_range
);
761 de_fault (to_region_ok_for_hw_watchpoint
,
762 default_region_ok_for_hw_watchpoint
);
763 de_fault (to_can_accel_watchpoint_condition
,
764 (int (*) (CORE_ADDR
, int, int, struct expression
*))
766 de_fault (to_terminal_init
,
769 de_fault (to_terminal_inferior
,
772 de_fault (to_terminal_ours_for_output
,
775 de_fault (to_terminal_ours
,
778 de_fault (to_terminal_save_ours
,
781 de_fault (to_terminal_info
,
782 default_terminal_info
);
784 (void (*) (char *, int))
786 de_fault (to_post_startup_inferior
,
789 de_fault (to_insert_fork_catchpoint
,
792 de_fault (to_remove_fork_catchpoint
,
795 de_fault (to_insert_vfork_catchpoint
,
798 de_fault (to_remove_vfork_catchpoint
,
801 de_fault (to_insert_exec_catchpoint
,
804 de_fault (to_remove_exec_catchpoint
,
807 de_fault (to_set_syscall_catchpoint
,
808 (int (*) (int, int, int, int, int *))
810 de_fault (to_has_exited
,
811 (int (*) (int, int, int *))
813 de_fault (to_can_run
,
815 de_fault (to_extra_thread_info
,
816 (char *(*) (struct thread_info
*))
818 de_fault (to_thread_name
,
819 (char *(*) (struct thread_info
*))
824 current_target
.to_xfer_partial
= current_xfer_partial
;
826 (void (*) (char *, struct ui_file
*))
828 de_fault (to_pid_to_exec_file
,
832 (void (*) (void (*) (enum inferior_event_type
, void*), void*))
834 de_fault (to_thread_architecture
,
835 default_thread_architecture
);
836 current_target
.to_read_description
= NULL
;
837 de_fault (to_get_ada_task_ptid
,
838 (ptid_t (*) (long, long))
839 default_get_ada_task_ptid
);
840 de_fault (to_supports_multi_process
,
843 de_fault (to_supports_enable_disable_tracepoint
,
846 de_fault (to_supports_string_tracing
,
849 de_fault (to_trace_init
,
852 de_fault (to_download_tracepoint
,
853 (void (*) (struct bp_location
*))
855 de_fault (to_can_download_tracepoint
,
858 de_fault (to_download_trace_state_variable
,
859 (void (*) (struct trace_state_variable
*))
861 de_fault (to_enable_tracepoint
,
862 (void (*) (struct bp_location
*))
864 de_fault (to_disable_tracepoint
,
865 (void (*) (struct bp_location
*))
867 de_fault (to_trace_set_readonly_regions
,
870 de_fault (to_trace_start
,
873 de_fault (to_get_trace_status
,
874 (int (*) (struct trace_status
*))
876 de_fault (to_get_tracepoint_status
,
877 (void (*) (struct breakpoint
*, struct uploaded_tp
*))
879 de_fault (to_trace_stop
,
882 de_fault (to_trace_find
,
883 (int (*) (enum trace_find_type
, int, ULONGEST
, ULONGEST
, int *))
885 de_fault (to_get_trace_state_variable_value
,
886 (int (*) (int, LONGEST
*))
888 de_fault (to_save_trace_data
,
889 (int (*) (const char *))
891 de_fault (to_upload_tracepoints
,
892 (int (*) (struct uploaded_tp
**))
894 de_fault (to_upload_trace_state_variables
,
895 (int (*) (struct uploaded_tsv
**))
897 de_fault (to_get_raw_trace_data
,
898 (LONGEST (*) (gdb_byte
*, ULONGEST
, LONGEST
))
900 de_fault (to_get_min_fast_tracepoint_insn_len
,
903 de_fault (to_set_disconnected_tracing
,
906 de_fault (to_set_circular_trace_buffer
,
909 de_fault (to_set_trace_notes
,
910 (int (*) (char *, char *, char *))
912 de_fault (to_get_tib_address
,
913 (int (*) (ptid_t
, CORE_ADDR
*))
915 de_fault (to_set_permissions
,
918 de_fault (to_static_tracepoint_marker_at
,
919 (int (*) (CORE_ADDR
, struct static_tracepoint_marker
*))
921 de_fault (to_static_tracepoint_markers_by_strid
,
922 (VEC(static_tracepoint_marker_p
) * (*) (const char *))
924 de_fault (to_traceframe_info
,
925 (struct traceframe_info
* (*) (void))
927 de_fault (to_execution_direction
, default_execution_direction
);
931 /* Finally, position the target-stack beneath the squashed
932 "current_target". That way code looking for a non-inherited
933 target method can quickly and simply find it. */
934 current_target
.beneath
= target_stack
;
937 setup_target_debug ();
940 /* Push a new target type into the stack of the existing target accessors,
941 possibly superseding some of the existing accessors.
943 Rather than allow an empty stack, we always have the dummy target at
944 the bottom stratum, so we can call the function vectors without
948 push_target (struct target_ops
*t
)
950 struct target_ops
**cur
;
952 /* Check magic number. If wrong, it probably means someone changed
953 the struct definition, but not all the places that initialize one. */
954 if (t
->to_magic
!= OPS_MAGIC
)
956 fprintf_unfiltered (gdb_stderr
,
957 "Magic number of %s target struct wrong\n",
959 internal_error (__FILE__
, __LINE__
,
960 _("failed internal consistency check"));
963 /* Find the proper stratum to install this target in. */
964 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
966 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
970 /* If there's already targets at this stratum, remove them. */
971 /* FIXME: cagney/2003-10-15: I think this should be popping all
972 targets to CUR, and not just those at this stratum level. */
973 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
975 /* There's already something at this stratum level. Close it,
976 and un-hook it from the stack. */
977 struct target_ops
*tmp
= (*cur
);
979 (*cur
) = (*cur
)->beneath
;
981 target_close (tmp
, 0);
984 /* We have removed all targets in our stratum, now add the new one. */
988 update_current_target ();
991 /* Remove a target_ops vector from the stack, wherever it may be.
992 Return how many times it was removed (0 or 1). */
995 unpush_target (struct target_ops
*t
)
997 struct target_ops
**cur
;
998 struct target_ops
*tmp
;
1000 if (t
->to_stratum
== dummy_stratum
)
1001 internal_error (__FILE__
, __LINE__
,
1002 _("Attempt to unpush the dummy target"));
1004 /* Look for the specified target. Note that we assume that a target
1005 can only occur once in the target stack. */
1007 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1013 /* If we don't find target_ops, quit. Only open targets should be
1018 /* Unchain the target. */
1020 (*cur
) = (*cur
)->beneath
;
1021 tmp
->beneath
= NULL
;
1023 update_current_target ();
1025 /* Finally close the target. Note we do this after unchaining, so
1026 any target method calls from within the target_close
1027 implementation don't end up in T anymore. */
1028 target_close (t
, 0);
1036 target_close (target_stack
, 0); /* Let it clean up. */
1037 if (unpush_target (target_stack
) == 1)
1040 fprintf_unfiltered (gdb_stderr
,
1041 "pop_target couldn't find target %s\n",
1042 current_target
.to_shortname
);
1043 internal_error (__FILE__
, __LINE__
,
1044 _("failed internal consistency check"));
1048 pop_all_targets_above (enum strata above_stratum
, int quitting
)
1050 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
1052 target_close (target_stack
, quitting
);
1053 if (!unpush_target (target_stack
))
1055 fprintf_unfiltered (gdb_stderr
,
1056 "pop_all_targets couldn't find target %s\n",
1057 target_stack
->to_shortname
);
1058 internal_error (__FILE__
, __LINE__
,
1059 _("failed internal consistency check"));
1066 pop_all_targets (int quitting
)
1068 pop_all_targets_above (dummy_stratum
, quitting
);
1071 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1074 target_is_pushed (struct target_ops
*t
)
1076 struct target_ops
**cur
;
1078 /* Check magic number. If wrong, it probably means someone changed
1079 the struct definition, but not all the places that initialize one. */
1080 if (t
->to_magic
!= OPS_MAGIC
)
1082 fprintf_unfiltered (gdb_stderr
,
1083 "Magic number of %s target struct wrong\n",
1085 internal_error (__FILE__
, __LINE__
,
1086 _("failed internal consistency check"));
1089 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
1096 /* Using the objfile specified in OBJFILE, find the address for the
1097 current thread's thread-local storage with offset OFFSET. */
1099 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
1101 volatile CORE_ADDR addr
= 0;
1102 struct target_ops
*target
;
1104 for (target
= current_target
.beneath
;
1106 target
= target
->beneath
)
1108 if (target
->to_get_thread_local_address
!= NULL
)
1113 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch
))
1115 ptid_t ptid
= inferior_ptid
;
1116 volatile struct gdb_exception ex
;
1118 TRY_CATCH (ex
, RETURN_MASK_ALL
)
1122 /* Fetch the load module address for this objfile. */
1123 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch
,
1125 /* If it's 0, throw the appropriate exception. */
1127 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
1128 _("TLS load module not found"));
1130 addr
= target
->to_get_thread_local_address (target
, ptid
,
1133 /* If an error occurred, print TLS related messages here. Otherwise,
1134 throw the error to some higher catcher. */
1137 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
1141 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
1142 error (_("Cannot find thread-local variables "
1143 "in this thread library."));
1145 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
1146 if (objfile_is_library
)
1147 error (_("Cannot find shared library `%s' in dynamic"
1148 " linker's load module list"), objfile
->name
);
1150 error (_("Cannot find executable file `%s' in dynamic"
1151 " linker's load module list"), objfile
->name
);
1153 case TLS_NOT_ALLOCATED_YET_ERROR
:
1154 if (objfile_is_library
)
1155 error (_("The inferior has not yet allocated storage for"
1156 " thread-local variables in\n"
1157 "the shared library `%s'\n"
1159 objfile
->name
, target_pid_to_str (ptid
));
1161 error (_("The inferior has not yet allocated storage for"
1162 " thread-local variables in\n"
1163 "the executable `%s'\n"
1165 objfile
->name
, target_pid_to_str (ptid
));
1167 case TLS_GENERIC_ERROR
:
1168 if (objfile_is_library
)
1169 error (_("Cannot find thread-local storage for %s, "
1170 "shared library %s:\n%s"),
1171 target_pid_to_str (ptid
),
1172 objfile
->name
, ex
.message
);
1174 error (_("Cannot find thread-local storage for %s, "
1175 "executable file %s:\n%s"),
1176 target_pid_to_str (ptid
),
1177 objfile
->name
, ex
.message
);
1180 throw_exception (ex
);
1185 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1186 TLS is an ABI-specific thing. But we don't do that yet. */
1188 error (_("Cannot find thread-local variables on this target"));
1194 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1196 /* target_read_string -- read a null terminated string, up to LEN bytes,
1197 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1198 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1199 is responsible for freeing it. Return the number of bytes successfully
1203 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1205 int tlen
, origlen
, offset
, i
;
1209 int buffer_allocated
;
1211 unsigned int nbytes_read
= 0;
1213 gdb_assert (string
);
1215 /* Small for testing. */
1216 buffer_allocated
= 4;
1217 buffer
= xmalloc (buffer_allocated
);
1224 tlen
= MIN (len
, 4 - (memaddr
& 3));
1225 offset
= memaddr
& 3;
1227 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1230 /* The transfer request might have crossed the boundary to an
1231 unallocated region of memory. Retry the transfer, requesting
1235 errcode
= target_read_memory (memaddr
, buf
, 1);
1240 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1244 bytes
= bufptr
- buffer
;
1245 buffer_allocated
*= 2;
1246 buffer
= xrealloc (buffer
, buffer_allocated
);
1247 bufptr
= buffer
+ bytes
;
1250 for (i
= 0; i
< tlen
; i
++)
1252 *bufptr
++ = buf
[i
+ offset
];
1253 if (buf
[i
+ offset
] == '\000')
1255 nbytes_read
+= i
+ 1;
1262 nbytes_read
+= tlen
;
1271 struct target_section_table
*
1272 target_get_section_table (struct target_ops
*target
)
1274 struct target_ops
*t
;
1277 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1279 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1280 if (t
->to_get_section_table
!= NULL
)
1281 return (*t
->to_get_section_table
) (t
);
1286 /* Find a section containing ADDR. */
1288 struct target_section
*
1289 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1291 struct target_section_table
*table
= target_get_section_table (target
);
1292 struct target_section
*secp
;
1297 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1299 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1305 /* Read memory from the live target, even if currently inspecting a
1306 traceframe. The return is the same as that of target_read. */
1309 target_read_live_memory (enum target_object object
,
1310 ULONGEST memaddr
, gdb_byte
*myaddr
, LONGEST len
)
1313 struct cleanup
*cleanup
;
1315 /* Switch momentarily out of tfind mode so to access live memory.
1316 Note that this must not clear global state, such as the frame
1317 cache, which must still remain valid for the previous traceframe.
1318 We may be _building_ the frame cache at this point. */
1319 cleanup
= make_cleanup_restore_traceframe_number ();
1320 set_traceframe_number (-1);
1322 ret
= target_read (current_target
.beneath
, object
, NULL
,
1323 myaddr
, memaddr
, len
);
1325 do_cleanups (cleanup
);
1329 /* Using the set of read-only target sections of OPS, read live
1330 read-only memory. Note that the actual reads start from the
1331 top-most target again.
1333 For interface/parameters/return description see target.h,
1337 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1338 enum target_object object
,
1339 gdb_byte
*readbuf
, ULONGEST memaddr
,
1342 struct target_section
*secp
;
1343 struct target_section_table
*table
;
1345 secp
= target_section_by_addr (ops
, memaddr
);
1347 && (bfd_get_section_flags (secp
->bfd
, secp
->the_bfd_section
)
1350 struct target_section
*p
;
1351 ULONGEST memend
= memaddr
+ len
;
1353 table
= target_get_section_table (ops
);
1355 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1357 if (memaddr
>= p
->addr
)
1359 if (memend
<= p
->endaddr
)
1361 /* Entire transfer is within this section. */
1362 return target_read_live_memory (object
, memaddr
,
1365 else if (memaddr
>= p
->endaddr
)
1367 /* This section ends before the transfer starts. */
1372 /* This section overlaps the transfer. Just do half. */
1373 len
= p
->endaddr
- memaddr
;
1374 return target_read_live_memory (object
, memaddr
,
1384 /* Perform a partial memory transfer.
1385 For docs see target.h, to_xfer_partial. */
1388 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1389 void *readbuf
, const void *writebuf
, ULONGEST memaddr
,
1394 struct mem_region
*region
;
1395 struct inferior
*inf
;
1397 /* For accesses to unmapped overlay sections, read directly from
1398 files. Must do this first, as MEMADDR may need adjustment. */
1399 if (readbuf
!= NULL
&& overlay_debugging
)
1401 struct obj_section
*section
= find_pc_overlay (memaddr
);
1403 if (pc_in_unmapped_range (memaddr
, section
))
1405 struct target_section_table
*table
1406 = target_get_section_table (ops
);
1407 const char *section_name
= section
->the_bfd_section
->name
;
1409 memaddr
= overlay_mapped_address (memaddr
, section
);
1410 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1413 table
->sections_end
,
1418 /* Try the executable files, if "trust-readonly-sections" is set. */
1419 if (readbuf
!= NULL
&& trust_readonly
)
1421 struct target_section
*secp
;
1422 struct target_section_table
*table
;
1424 secp
= target_section_by_addr (ops
, memaddr
);
1426 && (bfd_get_section_flags (secp
->bfd
, secp
->the_bfd_section
)
1429 table
= target_get_section_table (ops
);
1430 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1433 table
->sections_end
,
1438 /* If reading unavailable memory in the context of traceframes, and
1439 this address falls within a read-only section, fallback to
1440 reading from live memory. */
1441 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1443 VEC(mem_range_s
) *available
;
1445 /* If we fail to get the set of available memory, then the
1446 target does not support querying traceframe info, and so we
1447 attempt reading from the traceframe anyway (assuming the
1448 target implements the old QTro packet then). */
1449 if (traceframe_available_memory (&available
, memaddr
, len
))
1451 struct cleanup
*old_chain
;
1453 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1455 if (VEC_empty (mem_range_s
, available
)
1456 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1458 /* Don't read into the traceframe's available
1460 if (!VEC_empty (mem_range_s
, available
))
1462 LONGEST oldlen
= len
;
1464 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1465 gdb_assert (len
<= oldlen
);
1468 do_cleanups (old_chain
);
1470 /* This goes through the topmost target again. */
1471 res
= memory_xfer_live_readonly_partial (ops
, object
,
1472 readbuf
, memaddr
, len
);
1476 /* No use trying further, we know some memory starting
1477 at MEMADDR isn't available. */
1481 /* Don't try to read more than how much is available, in
1482 case the target implements the deprecated QTro packet to
1483 cater for older GDBs (the target's knowledge of read-only
1484 sections may be outdated by now). */
1485 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1487 do_cleanups (old_chain
);
1491 /* Try GDB's internal data cache. */
1492 region
= lookup_mem_region (memaddr
);
1493 /* region->hi == 0 means there's no upper bound. */
1494 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1497 reg_len
= region
->hi
- memaddr
;
1499 switch (region
->attrib
.mode
)
1502 if (writebuf
!= NULL
)
1507 if (readbuf
!= NULL
)
1512 /* We only support writing to flash during "load" for now. */
1513 if (writebuf
!= NULL
)
1514 error (_("Writing to flash memory forbidden in this context"));
1521 if (!ptid_equal (inferior_ptid
, null_ptid
))
1522 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1527 /* The dcache reads whole cache lines; that doesn't play well
1528 with reading from a trace buffer, because reading outside of
1529 the collected memory range fails. */
1530 && get_traceframe_number () == -1
1531 && (region
->attrib
.cache
1532 || (stack_cache_enabled_p
&& object
== TARGET_OBJECT_STACK_MEMORY
)))
1534 if (readbuf
!= NULL
)
1535 res
= dcache_xfer_memory (ops
, target_dcache
, memaddr
, readbuf
,
1538 /* FIXME drow/2006-08-09: If we're going to preserve const
1539 correctness dcache_xfer_memory should take readbuf and
1541 res
= dcache_xfer_memory (ops
, target_dcache
, memaddr
,
1550 /* If none of those methods found the memory we wanted, fall back
1551 to a target partial transfer. Normally a single call to
1552 to_xfer_partial is enough; if it doesn't recognize an object
1553 it will call the to_xfer_partial of the next target down.
1554 But for memory this won't do. Memory is the only target
1555 object which can be read from more than one valid target.
1556 A core file, for instance, could have some of memory but
1557 delegate other bits to the target below it. So, we must
1558 manually try all targets. */
1562 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1563 readbuf
, writebuf
, memaddr
, reg_len
);
1567 /* We want to continue past core files to executables, but not
1568 past a running target's memory. */
1569 if (ops
->to_has_all_memory (ops
))
1574 while (ops
!= NULL
);
1576 /* Make sure the cache gets updated no matter what - if we are writing
1577 to the stack. Even if this write is not tagged as such, we still need
1578 to update the cache. */
1583 && !region
->attrib
.cache
1584 && stack_cache_enabled_p
1585 && object
!= TARGET_OBJECT_STACK_MEMORY
)
1587 dcache_update (target_dcache
, memaddr
, (void *) writebuf
, res
);
1590 /* If we still haven't got anything, return the last error. We
1595 /* Perform a partial memory transfer. For docs see target.h,
1599 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1600 void *readbuf
, const void *writebuf
, ULONGEST memaddr
,
1605 /* Zero length requests are ok and require no work. */
1609 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1610 breakpoint insns, thus hiding out from higher layers whether
1611 there are software breakpoints inserted in the code stream. */
1612 if (readbuf
!= NULL
)
1614 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
);
1616 if (res
> 0 && !show_memory_breakpoints
)
1617 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1622 struct cleanup
*old_chain
;
1624 buf
= xmalloc (len
);
1625 old_chain
= make_cleanup (xfree
, buf
);
1626 memcpy (buf
, writebuf
, len
);
1628 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1629 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
);
1631 do_cleanups (old_chain
);
1638 restore_show_memory_breakpoints (void *arg
)
1640 show_memory_breakpoints
= (uintptr_t) arg
;
1644 make_show_memory_breakpoints_cleanup (int show
)
1646 int current
= show_memory_breakpoints
;
1648 show_memory_breakpoints
= show
;
1649 return make_cleanup (restore_show_memory_breakpoints
,
1650 (void *) (uintptr_t) current
);
1653 /* For docs see target.h, to_xfer_partial. */
1656 target_xfer_partial (struct target_ops
*ops
,
1657 enum target_object object
, const char *annex
,
1658 void *readbuf
, const void *writebuf
,
1659 ULONGEST offset
, LONGEST len
)
1663 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1665 if (writebuf
&& !may_write_memory
)
1666 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1667 core_addr_to_string_nz (offset
), plongest (len
));
1669 /* If this is a memory transfer, let the memory-specific code
1670 have a look at it instead. Memory transfers are more
1672 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
)
1673 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1674 writebuf
, offset
, len
);
1677 enum target_object raw_object
= object
;
1679 /* If this is a raw memory transfer, request the normal
1680 memory object from other layers. */
1681 if (raw_object
== TARGET_OBJECT_RAW_MEMORY
)
1682 raw_object
= TARGET_OBJECT_MEMORY
;
1684 retval
= ops
->to_xfer_partial (ops
, raw_object
, annex
, readbuf
,
1685 writebuf
, offset
, len
);
1690 const unsigned char *myaddr
= NULL
;
1692 fprintf_unfiltered (gdb_stdlog
,
1693 "%s:target_xfer_partial "
1694 "(%d, %s, %s, %s, %s, %s) = %s",
1697 (annex
? annex
: "(null)"),
1698 host_address_to_string (readbuf
),
1699 host_address_to_string (writebuf
),
1700 core_addr_to_string_nz (offset
),
1701 plongest (len
), plongest (retval
));
1707 if (retval
> 0 && myaddr
!= NULL
)
1711 fputs_unfiltered (", bytes =", gdb_stdlog
);
1712 for (i
= 0; i
< retval
; i
++)
1714 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1716 if (targetdebug
< 2 && i
> 0)
1718 fprintf_unfiltered (gdb_stdlog
, " ...");
1721 fprintf_unfiltered (gdb_stdlog
, "\n");
1724 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1728 fputc_unfiltered ('\n', gdb_stdlog
);
1733 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1734 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1735 if any error occurs.
1737 If an error occurs, no guarantee is made about the contents of the data at
1738 MYADDR. In particular, the caller should not depend upon partial reads
1739 filling the buffer with good data. There is no way for the caller to know
1740 how much good data might have been transfered anyway. Callers that can
1741 deal with partial reads should call target_read (which will retry until
1742 it makes no progress, and then return how much was transferred). */
1745 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, int len
)
1747 /* Dispatch to the topmost target, not the flattened current_target.
1748 Memory accesses check target->to_has_(all_)memory, and the
1749 flattened target doesn't inherit those. */
1750 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1751 myaddr
, memaddr
, len
) == len
)
1757 /* Like target_read_memory, but specify explicitly that this is a read from
1758 the target's stack. This may trigger different cache behavior. */
1761 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, int len
)
1763 /* Dispatch to the topmost target, not the flattened current_target.
1764 Memory accesses check target->to_has_(all_)memory, and the
1765 flattened target doesn't inherit those. */
1767 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1768 myaddr
, memaddr
, len
) == len
)
1774 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1775 Returns either 0 for success or an errno value if any error occurs.
1776 If an error occurs, no guarantee is made about how much data got written.
1777 Callers that can deal with partial writes should call target_write. */
1780 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, int len
)
1782 /* Dispatch to the topmost target, not the flattened current_target.
1783 Memory accesses check target->to_has_(all_)memory, and the
1784 flattened target doesn't inherit those. */
1785 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1786 myaddr
, memaddr
, len
) == len
)
1792 /* Write LEN bytes from MYADDR to target raw memory at address
1793 MEMADDR. Returns either 0 for success or an errno value if any
1794 error occurs. If an error occurs, no guarantee is made about how
1795 much data got written. Callers that can deal with partial writes
1796 should call target_write. */
1799 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, int len
)
1801 /* Dispatch to the topmost target, not the flattened current_target.
1802 Memory accesses check target->to_has_(all_)memory, and the
1803 flattened target doesn't inherit those. */
1804 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1805 myaddr
, memaddr
, len
) == len
)
1811 /* Fetch the target's memory map. */
1814 target_memory_map (void)
1816 VEC(mem_region_s
) *result
;
1817 struct mem_region
*last_one
, *this_one
;
1819 struct target_ops
*t
;
1822 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1824 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1825 if (t
->to_memory_map
!= NULL
)
1831 result
= t
->to_memory_map (t
);
1835 qsort (VEC_address (mem_region_s
, result
),
1836 VEC_length (mem_region_s
, result
),
1837 sizeof (struct mem_region
), mem_region_cmp
);
1839 /* Check that regions do not overlap. Simultaneously assign
1840 a numbering for the "mem" commands to use to refer to
1843 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1845 this_one
->number
= ix
;
1847 if (last_one
&& last_one
->hi
> this_one
->lo
)
1849 warning (_("Overlapping regions in memory map: ignoring"));
1850 VEC_free (mem_region_s
, result
);
1853 last_one
= this_one
;
1860 target_flash_erase (ULONGEST address
, LONGEST length
)
1862 struct target_ops
*t
;
1864 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1865 if (t
->to_flash_erase
!= NULL
)
1868 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1869 hex_string (address
), phex (length
, 0));
1870 t
->to_flash_erase (t
, address
, length
);
1878 target_flash_done (void)
1880 struct target_ops
*t
;
1882 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1883 if (t
->to_flash_done
!= NULL
)
1886 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1887 t
->to_flash_done (t
);
1895 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1896 struct cmd_list_element
*c
, const char *value
)
1898 fprintf_filtered (file
,
1899 _("Mode for reading from readonly sections is %s.\n"),
1903 /* More generic transfers. */
1906 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1907 const char *annex
, gdb_byte
*readbuf
,
1908 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
1910 if (object
== TARGET_OBJECT_MEMORY
1911 && ops
->deprecated_xfer_memory
!= NULL
)
1912 /* If available, fall back to the target's
1913 "deprecated_xfer_memory" method. */
1918 if (writebuf
!= NULL
)
1920 void *buffer
= xmalloc (len
);
1921 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1923 memcpy (buffer
, writebuf
, len
);
1924 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1925 1/*write*/, NULL
, ops
);
1926 do_cleanups (cleanup
);
1928 if (readbuf
!= NULL
)
1929 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1930 0/*read*/, NULL
, ops
);
1933 else if (xfered
== 0 && errno
== 0)
1934 /* "deprecated_xfer_memory" uses 0, cross checked against
1935 ERRNO as one indication of an error. */
1940 else if (ops
->beneath
!= NULL
)
1941 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1942 readbuf
, writebuf
, offset
, len
);
1947 /* The xfer_partial handler for the topmost target. Unlike the default,
1948 it does not need to handle memory specially; it just passes all
1949 requests down the stack. */
1952 current_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1953 const char *annex
, gdb_byte
*readbuf
,
1954 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
1956 if (ops
->beneath
!= NULL
)
1957 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1958 readbuf
, writebuf
, offset
, len
);
1963 /* Target vector read/write partial wrapper functions. */
1966 target_read_partial (struct target_ops
*ops
,
1967 enum target_object object
,
1968 const char *annex
, gdb_byte
*buf
,
1969 ULONGEST offset
, LONGEST len
)
1971 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
);
1975 target_write_partial (struct target_ops
*ops
,
1976 enum target_object object
,
1977 const char *annex
, const gdb_byte
*buf
,
1978 ULONGEST offset
, LONGEST len
)
1980 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
);
1983 /* Wrappers to perform the full transfer. */
1985 /* For docs on target_read see target.h. */
1988 target_read (struct target_ops
*ops
,
1989 enum target_object object
,
1990 const char *annex
, gdb_byte
*buf
,
1991 ULONGEST offset
, LONGEST len
)
1995 while (xfered
< len
)
1997 LONGEST xfer
= target_read_partial (ops
, object
, annex
,
1998 (gdb_byte
*) buf
+ xfered
,
1999 offset
+ xfered
, len
- xfered
);
2001 /* Call an observer, notifying them of the xfer progress? */
2012 /* Assuming that the entire [begin, end) range of memory cannot be
2013 read, try to read whatever subrange is possible to read.
2015 The function returns, in RESULT, either zero or one memory block.
2016 If there's a readable subrange at the beginning, it is completely
2017 read and returned. Any further readable subrange will not be read.
2018 Otherwise, if there's a readable subrange at the end, it will be
2019 completely read and returned. Any readable subranges before it
2020 (obviously, not starting at the beginning), will be ignored. In
2021 other cases -- either no readable subrange, or readable subrange(s)
2022 that is neither at the beginning, or end, nothing is returned.
2024 The purpose of this function is to handle a read across a boundary
2025 of accessible memory in a case when memory map is not available.
2026 The above restrictions are fine for this case, but will give
2027 incorrect results if the memory is 'patchy'. However, supporting
2028 'patchy' memory would require trying to read every single byte,
2029 and it seems unacceptable solution. Explicit memory map is
2030 recommended for this case -- and target_read_memory_robust will
2031 take care of reading multiple ranges then. */
2034 read_whatever_is_readable (struct target_ops
*ops
,
2035 ULONGEST begin
, ULONGEST end
,
2036 VEC(memory_read_result_s
) **result
)
2038 gdb_byte
*buf
= xmalloc (end
- begin
);
2039 ULONGEST current_begin
= begin
;
2040 ULONGEST current_end
= end
;
2042 memory_read_result_s r
;
2044 /* If we previously failed to read 1 byte, nothing can be done here. */
2045 if (end
- begin
<= 1)
2051 /* Check that either first or the last byte is readable, and give up
2052 if not. This heuristic is meant to permit reading accessible memory
2053 at the boundary of accessible region. */
2054 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2055 buf
, begin
, 1) == 1)
2060 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2061 buf
+ (end
-begin
) - 1, end
- 1, 1) == 1)
2072 /* Loop invariant is that the [current_begin, current_end) was previously
2073 found to be not readable as a whole.
2075 Note loop condition -- if the range has 1 byte, we can't divide the range
2076 so there's no point trying further. */
2077 while (current_end
- current_begin
> 1)
2079 ULONGEST first_half_begin
, first_half_end
;
2080 ULONGEST second_half_begin
, second_half_end
;
2082 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2086 first_half_begin
= current_begin
;
2087 first_half_end
= middle
;
2088 second_half_begin
= middle
;
2089 second_half_end
= current_end
;
2093 first_half_begin
= middle
;
2094 first_half_end
= current_end
;
2095 second_half_begin
= current_begin
;
2096 second_half_end
= middle
;
2099 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2100 buf
+ (first_half_begin
- begin
),
2102 first_half_end
- first_half_begin
);
2104 if (xfer
== first_half_end
- first_half_begin
)
2106 /* This half reads up fine. So, the error must be in the
2108 current_begin
= second_half_begin
;
2109 current_end
= second_half_end
;
2113 /* This half is not readable. Because we've tried one byte, we
2114 know some part of this half if actually redable. Go to the next
2115 iteration to divide again and try to read.
2117 We don't handle the other half, because this function only tries
2118 to read a single readable subrange. */
2119 current_begin
= first_half_begin
;
2120 current_end
= first_half_end
;
2126 /* The [begin, current_begin) range has been read. */
2128 r
.end
= current_begin
;
2133 /* The [current_end, end) range has been read. */
2134 LONGEST rlen
= end
- current_end
;
2136 r
.data
= xmalloc (rlen
);
2137 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2138 r
.begin
= current_end
;
2142 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2146 free_memory_read_result_vector (void *x
)
2148 VEC(memory_read_result_s
) *v
= x
;
2149 memory_read_result_s
*current
;
2152 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2154 xfree (current
->data
);
2156 VEC_free (memory_read_result_s
, v
);
2159 VEC(memory_read_result_s
) *
2160 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2162 VEC(memory_read_result_s
) *result
= 0;
2165 while (xfered
< len
)
2167 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2170 /* If there is no explicit region, a fake one should be created. */
2171 gdb_assert (region
);
2173 if (region
->hi
== 0)
2174 rlen
= len
- xfered
;
2176 rlen
= region
->hi
- offset
;
2178 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2180 /* Cannot read this region. Note that we can end up here only
2181 if the region is explicitly marked inaccessible, or
2182 'inaccessible-by-default' is in effect. */
2187 LONGEST to_read
= min (len
- xfered
, rlen
);
2188 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2190 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2191 (gdb_byte
*) buffer
,
2192 offset
+ xfered
, to_read
);
2193 /* Call an observer, notifying them of the xfer progress? */
2196 /* Got an error reading full chunk. See if maybe we can read
2199 read_whatever_is_readable (ops
, offset
+ xfered
,
2200 offset
+ xfered
+ to_read
, &result
);
2205 struct memory_read_result r
;
2207 r
.begin
= offset
+ xfered
;
2208 r
.end
= r
.begin
+ xfer
;
2209 VEC_safe_push (memory_read_result_s
, result
, &r
);
2219 /* An alternative to target_write with progress callbacks. */
2222 target_write_with_progress (struct target_ops
*ops
,
2223 enum target_object object
,
2224 const char *annex
, const gdb_byte
*buf
,
2225 ULONGEST offset
, LONGEST len
,
2226 void (*progress
) (ULONGEST
, void *), void *baton
)
2230 /* Give the progress callback a chance to set up. */
2232 (*progress
) (0, baton
);
2234 while (xfered
< len
)
2236 LONGEST xfer
= target_write_partial (ops
, object
, annex
,
2237 (gdb_byte
*) buf
+ xfered
,
2238 offset
+ xfered
, len
- xfered
);
2246 (*progress
) (xfer
, baton
);
2254 /* For docs on target_write see target.h. */
2257 target_write (struct target_ops
*ops
,
2258 enum target_object object
,
2259 const char *annex
, const gdb_byte
*buf
,
2260 ULONGEST offset
, LONGEST len
)
2262 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2266 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2267 the size of the transferred data. PADDING additional bytes are
2268 available in *BUF_P. This is a helper function for
2269 target_read_alloc; see the declaration of that function for more
2273 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2274 const char *annex
, gdb_byte
**buf_p
, int padding
)
2276 size_t buf_alloc
, buf_pos
;
2280 /* This function does not have a length parameter; it reads the
2281 entire OBJECT). Also, it doesn't support objects fetched partly
2282 from one target and partly from another (in a different stratum,
2283 e.g. a core file and an executable). Both reasons make it
2284 unsuitable for reading memory. */
2285 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2287 /* Start by reading up to 4K at a time. The target will throttle
2288 this number down if necessary. */
2290 buf
= xmalloc (buf_alloc
);
2294 n
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2295 buf_pos
, buf_alloc
- buf_pos
- padding
);
2298 /* An error occurred. */
2304 /* Read all there was. */
2314 /* If the buffer is filling up, expand it. */
2315 if (buf_alloc
< buf_pos
* 2)
2318 buf
= xrealloc (buf
, buf_alloc
);
2325 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2326 the size of the transferred data. See the declaration in "target.h"
2327 function for more information about the return value. */
2330 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2331 const char *annex
, gdb_byte
**buf_p
)
2333 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2336 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2337 returned as a string, allocated using xmalloc. If an error occurs
2338 or the transfer is unsupported, NULL is returned. Empty objects
2339 are returned as allocated but empty strings. A warning is issued
2340 if the result contains any embedded NUL bytes. */
2343 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2347 LONGEST transferred
;
2349 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2351 if (transferred
< 0)
2354 if (transferred
== 0)
2355 return xstrdup ("");
2357 buffer
[transferred
] = 0;
2358 if (strlen (buffer
) < transferred
)
2359 warning (_("target object %d, annex %s, "
2360 "contained unexpected null characters"),
2361 (int) object
, annex
? annex
: "(none)");
2363 return (char *) buffer
;
2366 /* Memory transfer methods. */
2369 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2372 /* This method is used to read from an alternate, non-current
2373 target. This read must bypass the overlay support (as symbols
2374 don't match this target), and GDB's internal cache (wrong cache
2375 for this target). */
2376 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2378 memory_error (EIO
, addr
);
2382 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2383 int len
, enum bfd_endian byte_order
)
2385 gdb_byte buf
[sizeof (ULONGEST
)];
2387 gdb_assert (len
<= sizeof (buf
));
2388 get_target_memory (ops
, addr
, buf
, len
);
2389 return extract_unsigned_integer (buf
, len
, byte_order
);
2393 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2394 struct bp_target_info
*bp_tgt
)
2396 if (!may_insert_breakpoints
)
2398 warning (_("May not insert breakpoints"));
2402 return (*current_target
.to_insert_breakpoint
) (gdbarch
, bp_tgt
);
2406 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2407 struct bp_target_info
*bp_tgt
)
2409 /* This is kind of a weird case to handle, but the permission might
2410 have been changed after breakpoints were inserted - in which case
2411 we should just take the user literally and assume that any
2412 breakpoints should be left in place. */
2413 if (!may_insert_breakpoints
)
2415 warning (_("May not remove breakpoints"));
2419 return (*current_target
.to_remove_breakpoint
) (gdbarch
, bp_tgt
);
2423 target_info (char *args
, int from_tty
)
2425 struct target_ops
*t
;
2426 int has_all_mem
= 0;
2428 if (symfile_objfile
!= NULL
)
2429 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile
->name
);
2431 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2433 if (!(*t
->to_has_memory
) (t
))
2436 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2439 printf_unfiltered (_("\tWhile running this, "
2440 "GDB does not access memory from...\n"));
2441 printf_unfiltered ("%s:\n", t
->to_longname
);
2442 (t
->to_files_info
) (t
);
2443 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2447 /* This function is called before any new inferior is created, e.g.
2448 by running a program, attaching, or connecting to a target.
2449 It cleans up any state from previous invocations which might
2450 change between runs. This is a subset of what target_preopen
2451 resets (things which might change between targets). */
2454 target_pre_inferior (int from_tty
)
2456 /* Clear out solib state. Otherwise the solib state of the previous
2457 inferior might have survived and is entirely wrong for the new
2458 target. This has been observed on GNU/Linux using glibc 2.3. How
2470 Cannot access memory at address 0xdeadbeef
2473 /* In some OSs, the shared library list is the same/global/shared
2474 across inferiors. If code is shared between processes, so are
2475 memory regions and features. */
2476 if (!gdbarch_has_global_solist (target_gdbarch
))
2478 no_shared_libraries (NULL
, from_tty
);
2480 invalidate_target_mem_regions ();
2482 target_clear_description ();
2486 /* Callback for iterate_over_inferiors. Gets rid of the given
2490 dispose_inferior (struct inferior
*inf
, void *args
)
2492 struct thread_info
*thread
;
2494 thread
= any_thread_of_process (inf
->pid
);
2497 switch_to_thread (thread
->ptid
);
2499 /* Core inferiors actually should be detached, not killed. */
2500 if (target_has_execution
)
2503 target_detach (NULL
, 0);
2509 /* This is to be called by the open routine before it does
2513 target_preopen (int from_tty
)
2517 if (have_inferiors ())
2520 || !have_live_inferiors ()
2521 || query (_("A program is being debugged already. Kill it? ")))
2522 iterate_over_inferiors (dispose_inferior
, NULL
);
2524 error (_("Program not killed."));
2527 /* Calling target_kill may remove the target from the stack. But if
2528 it doesn't (which seems like a win for UDI), remove it now. */
2529 /* Leave the exec target, though. The user may be switching from a
2530 live process to a core of the same program. */
2531 pop_all_targets_above (file_stratum
, 0);
2533 target_pre_inferior (from_tty
);
2536 /* Detach a target after doing deferred register stores. */
2539 target_detach (char *args
, int from_tty
)
2541 struct target_ops
* t
;
2543 if (gdbarch_has_global_breakpoints (target_gdbarch
))
2544 /* Don't remove global breakpoints here. They're removed on
2545 disconnection from the target. */
2548 /* If we're in breakpoints-always-inserted mode, have to remove
2549 them before detaching. */
2550 remove_breakpoints_pid (PIDGET (inferior_ptid
));
2552 prepare_for_detach ();
2554 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2556 if (t
->to_detach
!= NULL
)
2558 t
->to_detach (t
, args
, from_tty
);
2560 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2566 internal_error (__FILE__
, __LINE__
, _("could not find a target to detach"));
2570 target_disconnect (char *args
, int from_tty
)
2572 struct target_ops
*t
;
2574 /* If we're in breakpoints-always-inserted mode or if breakpoints
2575 are global across processes, we have to remove them before
2577 remove_breakpoints ();
2579 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2580 if (t
->to_disconnect
!= NULL
)
2583 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2585 t
->to_disconnect (t
, args
, from_tty
);
2593 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2595 struct target_ops
*t
;
2597 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2599 if (t
->to_wait
!= NULL
)
2601 ptid_t retval
= (*t
->to_wait
) (t
, ptid
, status
, options
);
2605 char *status_string
;
2607 status_string
= target_waitstatus_to_string (status
);
2608 fprintf_unfiltered (gdb_stdlog
,
2609 "target_wait (%d, status) = %d, %s\n",
2610 PIDGET (ptid
), PIDGET (retval
),
2612 xfree (status_string
);
2623 target_pid_to_str (ptid_t ptid
)
2625 struct target_ops
*t
;
2627 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2629 if (t
->to_pid_to_str
!= NULL
)
2630 return (*t
->to_pid_to_str
) (t
, ptid
);
2633 return normal_pid_to_str (ptid
);
2637 target_thread_name (struct thread_info
*info
)
2639 struct target_ops
*t
;
2641 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2643 if (t
->to_thread_name
!= NULL
)
2644 return (*t
->to_thread_name
) (info
);
2651 target_resume (ptid_t ptid
, int step
, enum target_signal signal
)
2653 struct target_ops
*t
;
2655 target_dcache_invalidate ();
2657 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2659 if (t
->to_resume
!= NULL
)
2661 t
->to_resume (t
, ptid
, step
, signal
);
2663 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2665 step
? "step" : "continue",
2666 target_signal_to_name (signal
));
2668 registers_changed_ptid (ptid
);
2669 set_executing (ptid
, 1);
2670 set_running (ptid
, 1);
2671 clear_inline_frame_state (ptid
);
2680 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2682 struct target_ops
*t
;
2684 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2686 if (t
->to_pass_signals
!= NULL
)
2692 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2695 for (i
= 0; i
< numsigs
; i
++)
2696 if (pass_signals
[i
])
2697 fprintf_unfiltered (gdb_stdlog
, " %s",
2698 target_signal_to_name (i
));
2700 fprintf_unfiltered (gdb_stdlog
, " })\n");
2703 (*t
->to_pass_signals
) (numsigs
, pass_signals
);
2709 /* Look through the list of possible targets for a target that can
2713 target_follow_fork (int follow_child
)
2715 struct target_ops
*t
;
2717 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2719 if (t
->to_follow_fork
!= NULL
)
2721 int retval
= t
->to_follow_fork (t
, follow_child
);
2724 fprintf_unfiltered (gdb_stdlog
, "target_follow_fork (%d) = %d\n",
2725 follow_child
, retval
);
2730 /* Some target returned a fork event, but did not know how to follow it. */
2731 internal_error (__FILE__
, __LINE__
,
2732 _("could not find a target to follow fork"));
2736 target_mourn_inferior (void)
2738 struct target_ops
*t
;
2740 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2742 if (t
->to_mourn_inferior
!= NULL
)
2744 t
->to_mourn_inferior (t
);
2746 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2748 /* We no longer need to keep handles on any of the object files.
2749 Make sure to release them to avoid unnecessarily locking any
2750 of them while we're not actually debugging. */
2751 bfd_cache_close_all ();
2757 internal_error (__FILE__
, __LINE__
,
2758 _("could not find a target to follow mourn inferior"));
2761 /* Look for a target which can describe architectural features, starting
2762 from TARGET. If we find one, return its description. */
2764 const struct target_desc
*
2765 target_read_description (struct target_ops
*target
)
2767 struct target_ops
*t
;
2769 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2770 if (t
->to_read_description
!= NULL
)
2772 const struct target_desc
*tdesc
;
2774 tdesc
= t
->to_read_description (t
);
2782 /* The default implementation of to_search_memory.
2783 This implements a basic search of memory, reading target memory and
2784 performing the search here (as opposed to performing the search in on the
2785 target side with, for example, gdbserver). */
2788 simple_search_memory (struct target_ops
*ops
,
2789 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2790 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2791 CORE_ADDR
*found_addrp
)
2793 /* NOTE: also defined in find.c testcase. */
2794 #define SEARCH_CHUNK_SIZE 16000
2795 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2796 /* Buffer to hold memory contents for searching. */
2797 gdb_byte
*search_buf
;
2798 unsigned search_buf_size
;
2799 struct cleanup
*old_cleanups
;
2801 search_buf_size
= chunk_size
+ pattern_len
- 1;
2803 /* No point in trying to allocate a buffer larger than the search space. */
2804 if (search_space_len
< search_buf_size
)
2805 search_buf_size
= search_space_len
;
2807 search_buf
= malloc (search_buf_size
);
2808 if (search_buf
== NULL
)
2809 error (_("Unable to allocate memory to perform the search."));
2810 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2812 /* Prime the search buffer. */
2814 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2815 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2817 warning (_("Unable to access target memory at %s, halting search."),
2818 hex_string (start_addr
));
2819 do_cleanups (old_cleanups
);
2823 /* Perform the search.
2825 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2826 When we've scanned N bytes we copy the trailing bytes to the start and
2827 read in another N bytes. */
2829 while (search_space_len
>= pattern_len
)
2831 gdb_byte
*found_ptr
;
2832 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2834 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2835 pattern
, pattern_len
);
2837 if (found_ptr
!= NULL
)
2839 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2841 *found_addrp
= found_addr
;
2842 do_cleanups (old_cleanups
);
2846 /* Not found in this chunk, skip to next chunk. */
2848 /* Don't let search_space_len wrap here, it's unsigned. */
2849 if (search_space_len
>= chunk_size
)
2850 search_space_len
-= chunk_size
;
2852 search_space_len
= 0;
2854 if (search_space_len
>= pattern_len
)
2856 unsigned keep_len
= search_buf_size
- chunk_size
;
2857 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2860 /* Copy the trailing part of the previous iteration to the front
2861 of the buffer for the next iteration. */
2862 gdb_assert (keep_len
== pattern_len
- 1);
2863 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2865 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2867 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2868 search_buf
+ keep_len
, read_addr
,
2869 nr_to_read
) != nr_to_read
)
2871 warning (_("Unable to access target "
2872 "memory at %s, halting search."),
2873 hex_string (read_addr
));
2874 do_cleanups (old_cleanups
);
2878 start_addr
+= chunk_size
;
2884 do_cleanups (old_cleanups
);
2888 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2889 sequence of bytes in PATTERN with length PATTERN_LEN.
2891 The result is 1 if found, 0 if not found, and -1 if there was an error
2892 requiring halting of the search (e.g. memory read error).
2893 If the pattern is found the address is recorded in FOUND_ADDRP. */
2896 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2897 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2898 CORE_ADDR
*found_addrp
)
2900 struct target_ops
*t
;
2903 /* We don't use INHERIT to set current_target.to_search_memory,
2904 so we have to scan the target stack and handle targetdebug
2908 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2909 hex_string (start_addr
));
2911 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2912 if (t
->to_search_memory
!= NULL
)
2917 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2918 pattern
, pattern_len
, found_addrp
);
2922 /* If a special version of to_search_memory isn't available, use the
2924 found
= simple_search_memory (current_target
.beneath
,
2925 start_addr
, search_space_len
,
2926 pattern
, pattern_len
, found_addrp
);
2930 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2935 /* Look through the currently pushed targets. If none of them will
2936 be able to restart the currently running process, issue an error
2940 target_require_runnable (void)
2942 struct target_ops
*t
;
2944 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2946 /* If this target knows how to create a new program, then
2947 assume we will still be able to after killing the current
2948 one. Either killing and mourning will not pop T, or else
2949 find_default_run_target will find it again. */
2950 if (t
->to_create_inferior
!= NULL
)
2953 /* Do not worry about thread_stratum targets that can not
2954 create inferiors. Assume they will be pushed again if
2955 necessary, and continue to the process_stratum. */
2956 if (t
->to_stratum
== thread_stratum
2957 || t
->to_stratum
== arch_stratum
)
2960 error (_("The \"%s\" target does not support \"run\". "
2961 "Try \"help target\" or \"continue\"."),
2965 /* This function is only called if the target is running. In that
2966 case there should have been a process_stratum target and it
2967 should either know how to create inferiors, or not... */
2968 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2971 /* Look through the list of possible targets for a target that can
2972 execute a run or attach command without any other data. This is
2973 used to locate the default process stratum.
2975 If DO_MESG is not NULL, the result is always valid (error() is
2976 called for errors); else, return NULL on error. */
2978 static struct target_ops
*
2979 find_default_run_target (char *do_mesg
)
2981 struct target_ops
**t
;
2982 struct target_ops
*runable
= NULL
;
2987 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2990 if ((*t
)->to_can_run
&& target_can_run (*t
))
3000 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
3009 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
3011 struct target_ops
*t
;
3013 t
= find_default_run_target ("attach");
3014 (t
->to_attach
) (t
, args
, from_tty
);
3019 find_default_create_inferior (struct target_ops
*ops
,
3020 char *exec_file
, char *allargs
, char **env
,
3023 struct target_ops
*t
;
3025 t
= find_default_run_target ("run");
3026 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3031 find_default_can_async_p (void)
3033 struct target_ops
*t
;
3035 /* This may be called before the target is pushed on the stack;
3036 look for the default process stratum. If there's none, gdb isn't
3037 configured with a native debugger, and target remote isn't
3039 t
= find_default_run_target (NULL
);
3040 if (t
&& t
->to_can_async_p
)
3041 return (t
->to_can_async_p
) ();
3046 find_default_is_async_p (void)
3048 struct target_ops
*t
;
3050 /* This may be called before the target is pushed on the stack;
3051 look for the default process stratum. If there's none, gdb isn't
3052 configured with a native debugger, and target remote isn't
3054 t
= find_default_run_target (NULL
);
3055 if (t
&& t
->to_is_async_p
)
3056 return (t
->to_is_async_p
) ();
3061 find_default_supports_non_stop (void)
3063 struct target_ops
*t
;
3065 t
= find_default_run_target (NULL
);
3066 if (t
&& t
->to_supports_non_stop
)
3067 return (t
->to_supports_non_stop
) ();
3072 target_supports_non_stop (void)
3074 struct target_ops
*t
;
3076 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3077 if (t
->to_supports_non_stop
)
3078 return t
->to_supports_non_stop ();
3084 find_default_supports_disable_randomization (void)
3086 struct target_ops
*t
;
3088 t
= find_default_run_target (NULL
);
3089 if (t
&& t
->to_supports_disable_randomization
)
3090 return (t
->to_supports_disable_randomization
) ();
3095 target_supports_disable_randomization (void)
3097 struct target_ops
*t
;
3099 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3100 if (t
->to_supports_disable_randomization
)
3101 return t
->to_supports_disable_randomization ();
3107 target_get_osdata (const char *type
)
3109 struct target_ops
*t
;
3111 /* If we're already connected to something that can get us OS
3112 related data, use it. Otherwise, try using the native
3114 if (current_target
.to_stratum
>= process_stratum
)
3115 t
= current_target
.beneath
;
3117 t
= find_default_run_target ("get OS data");
3122 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3125 /* Determine the current address space of thread PTID. */
3127 struct address_space
*
3128 target_thread_address_space (ptid_t ptid
)
3130 struct address_space
*aspace
;
3131 struct inferior
*inf
;
3132 struct target_ops
*t
;
3134 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3136 if (t
->to_thread_address_space
!= NULL
)
3138 aspace
= t
->to_thread_address_space (t
, ptid
);
3139 gdb_assert (aspace
);
3142 fprintf_unfiltered (gdb_stdlog
,
3143 "target_thread_address_space (%s) = %d\n",
3144 target_pid_to_str (ptid
),
3145 address_space_num (aspace
));
3150 /* Fall-back to the "main" address space of the inferior. */
3151 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3153 if (inf
== NULL
|| inf
->aspace
== NULL
)
3154 internal_error (__FILE__
, __LINE__
,
3155 _("Can't determine the current "
3156 "address space of thread %s\n"),
3157 target_pid_to_str (ptid
));
3163 default_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
3165 return (len
<= gdbarch_ptr_bit (target_gdbarch
) / TARGET_CHAR_BIT
);
3169 default_watchpoint_addr_within_range (struct target_ops
*target
,
3171 CORE_ADDR start
, int length
)
3173 return addr
>= start
&& addr
< start
+ length
;
3176 static struct gdbarch
*
3177 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3179 return target_gdbarch
;
3195 return_minus_one (void)
3200 /* Find a single runnable target in the stack and return it. If for
3201 some reason there is more than one, return NULL. */
3204 find_run_target (void)
3206 struct target_ops
**t
;
3207 struct target_ops
*runable
= NULL
;
3212 for (t
= target_structs
; t
< target_structs
+ target_struct_size
; ++t
)
3214 if ((*t
)->to_can_run
&& target_can_run (*t
))
3221 return (count
== 1 ? runable
: NULL
);
3225 * Find the next target down the stack from the specified target.
3229 find_target_beneath (struct target_ops
*t
)
3235 /* The inferior process has died. Long live the inferior! */
3238 generic_mourn_inferior (void)
3242 ptid
= inferior_ptid
;
3243 inferior_ptid
= null_ptid
;
3245 if (!ptid_equal (ptid
, null_ptid
))
3247 int pid
= ptid_get_pid (ptid
);
3248 exit_inferior (pid
);
3251 breakpoint_init_inferior (inf_exited
);
3252 registers_changed ();
3254 reopen_exec_file ();
3255 reinit_frame_cache ();
3257 if (deprecated_detach_hook
)
3258 deprecated_detach_hook ();
3261 /* Helper function for child_wait and the derivatives of child_wait.
3262 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3263 translation of that in OURSTATUS. */
3265 store_waitstatus (struct target_waitstatus
*ourstatus
, int hoststatus
)
3267 if (WIFEXITED (hoststatus
))
3269 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
3270 ourstatus
->value
.integer
= WEXITSTATUS (hoststatus
);
3272 else if (!WIFSTOPPED (hoststatus
))
3274 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
3275 ourstatus
->value
.sig
= target_signal_from_host (WTERMSIG (hoststatus
));
3279 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3280 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (hoststatus
));
3284 /* Convert a normal process ID to a string. Returns the string in a
3288 normal_pid_to_str (ptid_t ptid
)
3290 static char buf
[32];
3292 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3297 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3299 return normal_pid_to_str (ptid
);
3302 /* Error-catcher for target_find_memory_regions. */
3304 dummy_find_memory_regions (find_memory_region_ftype ignore1
, void *ignore2
)
3306 error (_("Command not implemented for this target."));
3310 /* Error-catcher for target_make_corefile_notes. */
3312 dummy_make_corefile_notes (bfd
*ignore1
, int *ignore2
)
3314 error (_("Command not implemented for this target."));
3318 /* Error-catcher for target_get_bookmark. */
3320 dummy_get_bookmark (char *ignore1
, int ignore2
)
3326 /* Error-catcher for target_goto_bookmark. */
3328 dummy_goto_bookmark (gdb_byte
*ignore
, int from_tty
)
3333 /* Set up the handful of non-empty slots needed by the dummy target
3337 init_dummy_target (void)
3339 dummy_target
.to_shortname
= "None";
3340 dummy_target
.to_longname
= "None";
3341 dummy_target
.to_doc
= "";
3342 dummy_target
.to_attach
= find_default_attach
;
3343 dummy_target
.to_detach
=
3344 (void (*)(struct target_ops
*, char *, int))target_ignore
;
3345 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3346 dummy_target
.to_can_async_p
= find_default_can_async_p
;
3347 dummy_target
.to_is_async_p
= find_default_is_async_p
;
3348 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3349 dummy_target
.to_supports_disable_randomization
3350 = find_default_supports_disable_randomization
;
3351 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3352 dummy_target
.to_stratum
= dummy_stratum
;
3353 dummy_target
.to_find_memory_regions
= dummy_find_memory_regions
;
3354 dummy_target
.to_make_corefile_notes
= dummy_make_corefile_notes
;
3355 dummy_target
.to_get_bookmark
= dummy_get_bookmark
;
3356 dummy_target
.to_goto_bookmark
= dummy_goto_bookmark
;
3357 dummy_target
.to_xfer_partial
= default_xfer_partial
;
3358 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3359 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3360 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3361 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3362 dummy_target
.to_has_execution
3363 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3364 dummy_target
.to_stopped_by_watchpoint
= return_zero
;
3365 dummy_target
.to_stopped_data_address
=
3366 (int (*) (struct target_ops
*, CORE_ADDR
*)) return_zero
;
3367 dummy_target
.to_magic
= OPS_MAGIC
;
3371 debug_to_open (char *args
, int from_tty
)
3373 debug_target
.to_open (args
, from_tty
);
3375 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3379 target_close (struct target_ops
*targ
, int quitting
)
3381 if (targ
->to_xclose
!= NULL
)
3382 targ
->to_xclose (targ
, quitting
);
3383 else if (targ
->to_close
!= NULL
)
3384 targ
->to_close (quitting
);
3387 fprintf_unfiltered (gdb_stdlog
, "target_close (%d)\n", quitting
);
3391 target_attach (char *args
, int from_tty
)
3393 struct target_ops
*t
;
3395 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3397 if (t
->to_attach
!= NULL
)
3399 t
->to_attach (t
, args
, from_tty
);
3401 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3407 internal_error (__FILE__
, __LINE__
,
3408 _("could not find a target to attach"));
3412 target_thread_alive (ptid_t ptid
)
3414 struct target_ops
*t
;
3416 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3418 if (t
->to_thread_alive
!= NULL
)
3422 retval
= t
->to_thread_alive (t
, ptid
);
3424 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3425 PIDGET (ptid
), retval
);
3435 target_find_new_threads (void)
3437 struct target_ops
*t
;
3439 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3441 if (t
->to_find_new_threads
!= NULL
)
3443 t
->to_find_new_threads (t
);
3445 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3453 target_stop (ptid_t ptid
)
3457 warning (_("May not interrupt or stop the target, ignoring attempt"));
3461 (*current_target
.to_stop
) (ptid
);
3465 debug_to_post_attach (int pid
)
3467 debug_target
.to_post_attach (pid
);
3469 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3472 /* Return a pretty printed form of target_waitstatus.
3473 Space for the result is malloc'd, caller must free. */
3476 target_waitstatus_to_string (const struct target_waitstatus
*ws
)
3478 const char *kind_str
= "status->kind = ";
3482 case TARGET_WAITKIND_EXITED
:
3483 return xstrprintf ("%sexited, status = %d",
3484 kind_str
, ws
->value
.integer
);
3485 case TARGET_WAITKIND_STOPPED
:
3486 return xstrprintf ("%sstopped, signal = %s",
3487 kind_str
, target_signal_to_name (ws
->value
.sig
));
3488 case TARGET_WAITKIND_SIGNALLED
:
3489 return xstrprintf ("%ssignalled, signal = %s",
3490 kind_str
, target_signal_to_name (ws
->value
.sig
));
3491 case TARGET_WAITKIND_LOADED
:
3492 return xstrprintf ("%sloaded", kind_str
);
3493 case TARGET_WAITKIND_FORKED
:
3494 return xstrprintf ("%sforked", kind_str
);
3495 case TARGET_WAITKIND_VFORKED
:
3496 return xstrprintf ("%svforked", kind_str
);
3497 case TARGET_WAITKIND_EXECD
:
3498 return xstrprintf ("%sexecd", kind_str
);
3499 case TARGET_WAITKIND_SYSCALL_ENTRY
:
3500 return xstrprintf ("%sentered syscall", kind_str
);
3501 case TARGET_WAITKIND_SYSCALL_RETURN
:
3502 return xstrprintf ("%sexited syscall", kind_str
);
3503 case TARGET_WAITKIND_SPURIOUS
:
3504 return xstrprintf ("%sspurious", kind_str
);
3505 case TARGET_WAITKIND_IGNORE
:
3506 return xstrprintf ("%signore", kind_str
);
3507 case TARGET_WAITKIND_NO_HISTORY
:
3508 return xstrprintf ("%sno-history", kind_str
);
3509 case TARGET_WAITKIND_NO_RESUMED
:
3510 return xstrprintf ("%sno-resumed", kind_str
);
3512 return xstrprintf ("%sunknown???", kind_str
);
3517 debug_print_register (const char * func
,
3518 struct regcache
*regcache
, int regno
)
3520 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3522 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3523 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3524 && gdbarch_register_name (gdbarch
, regno
) != NULL
3525 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3526 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3527 gdbarch_register_name (gdbarch
, regno
));
3529 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3530 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3532 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3533 int i
, size
= register_size (gdbarch
, regno
);
3534 unsigned char buf
[MAX_REGISTER_SIZE
];
3536 regcache_raw_collect (regcache
, regno
, buf
);
3537 fprintf_unfiltered (gdb_stdlog
, " = ");
3538 for (i
= 0; i
< size
; i
++)
3540 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3542 if (size
<= sizeof (LONGEST
))
3544 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3546 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3547 core_addr_to_string_nz (val
), plongest (val
));
3550 fprintf_unfiltered (gdb_stdlog
, "\n");
3554 target_fetch_registers (struct regcache
*regcache
, int regno
)
3556 struct target_ops
*t
;
3558 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3560 if (t
->to_fetch_registers
!= NULL
)
3562 t
->to_fetch_registers (t
, regcache
, regno
);
3564 debug_print_register ("target_fetch_registers", regcache
, regno
);
3571 target_store_registers (struct regcache
*regcache
, int regno
)
3573 struct target_ops
*t
;
3575 if (!may_write_registers
)
3576 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3578 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3580 if (t
->to_store_registers
!= NULL
)
3582 t
->to_store_registers (t
, regcache
, regno
);
3585 debug_print_register ("target_store_registers", regcache
, regno
);
3595 target_core_of_thread (ptid_t ptid
)
3597 struct target_ops
*t
;
3599 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3601 if (t
->to_core_of_thread
!= NULL
)
3603 int retval
= t
->to_core_of_thread (t
, ptid
);
3606 fprintf_unfiltered (gdb_stdlog
,
3607 "target_core_of_thread (%d) = %d\n",
3608 PIDGET (ptid
), retval
);
3617 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3619 struct target_ops
*t
;
3621 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3623 if (t
->to_verify_memory
!= NULL
)
3625 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3628 fprintf_unfiltered (gdb_stdlog
,
3629 "target_verify_memory (%s, %s) = %d\n",
3630 paddress (target_gdbarch
, memaddr
),
3640 /* The documentation for this function is in its prototype declaration in
3644 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3646 struct target_ops
*t
;
3648 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3649 if (t
->to_insert_mask_watchpoint
!= NULL
)
3653 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
3656 fprintf_unfiltered (gdb_stdlog
, "\
3657 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3658 core_addr_to_string (addr
),
3659 core_addr_to_string (mask
), rw
, ret
);
3667 /* The documentation for this function is in its prototype declaration in
3671 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3673 struct target_ops
*t
;
3675 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3676 if (t
->to_remove_mask_watchpoint
!= NULL
)
3680 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
3683 fprintf_unfiltered (gdb_stdlog
, "\
3684 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3685 core_addr_to_string (addr
),
3686 core_addr_to_string (mask
), rw
, ret
);
3694 /* The documentation for this function is in its prototype declaration
3698 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3700 struct target_ops
*t
;
3702 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3703 if (t
->to_masked_watch_num_registers
!= NULL
)
3704 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
3709 /* The documentation for this function is in its prototype declaration
3713 target_ranged_break_num_registers (void)
3715 struct target_ops
*t
;
3717 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3718 if (t
->to_ranged_break_num_registers
!= NULL
)
3719 return t
->to_ranged_break_num_registers (t
);
3725 debug_to_prepare_to_store (struct regcache
*regcache
)
3727 debug_target
.to_prepare_to_store (regcache
);
3729 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
3733 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
3734 int write
, struct mem_attrib
*attrib
,
3735 struct target_ops
*target
)
3739 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
3742 fprintf_unfiltered (gdb_stdlog
,
3743 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3744 paddress (target_gdbarch
, memaddr
), len
,
3745 write
? "write" : "read", retval
);
3751 fputs_unfiltered (", bytes =", gdb_stdlog
);
3752 for (i
= 0; i
< retval
; i
++)
3754 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
3756 if (targetdebug
< 2 && i
> 0)
3758 fprintf_unfiltered (gdb_stdlog
, " ...");
3761 fprintf_unfiltered (gdb_stdlog
, "\n");
3764 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
3768 fputc_unfiltered ('\n', gdb_stdlog
);
3774 debug_to_files_info (struct target_ops
*target
)
3776 debug_target
.to_files_info (target
);
3778 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
3782 debug_to_insert_breakpoint (struct gdbarch
*gdbarch
,
3783 struct bp_target_info
*bp_tgt
)
3787 retval
= debug_target
.to_insert_breakpoint (gdbarch
, bp_tgt
);
3789 fprintf_unfiltered (gdb_stdlog
,
3790 "target_insert_breakpoint (%s, xxx) = %ld\n",
3791 core_addr_to_string (bp_tgt
->placed_address
),
3792 (unsigned long) retval
);
3797 debug_to_remove_breakpoint (struct gdbarch
*gdbarch
,
3798 struct bp_target_info
*bp_tgt
)
3802 retval
= debug_target
.to_remove_breakpoint (gdbarch
, bp_tgt
);
3804 fprintf_unfiltered (gdb_stdlog
,
3805 "target_remove_breakpoint (%s, xxx) = %ld\n",
3806 core_addr_to_string (bp_tgt
->placed_address
),
3807 (unsigned long) retval
);
3812 debug_to_can_use_hw_breakpoint (int type
, int cnt
, int from_tty
)
3816 retval
= debug_target
.to_can_use_hw_breakpoint (type
, cnt
, from_tty
);
3818 fprintf_unfiltered (gdb_stdlog
,
3819 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3820 (unsigned long) type
,
3821 (unsigned long) cnt
,
3822 (unsigned long) from_tty
,
3823 (unsigned long) retval
);
3828 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr
, int len
)
3832 retval
= debug_target
.to_region_ok_for_hw_watchpoint (addr
, len
);
3834 fprintf_unfiltered (gdb_stdlog
,
3835 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3836 core_addr_to_string (addr
), (unsigned long) len
,
3837 core_addr_to_string (retval
));
3842 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr
, int len
, int rw
,
3843 struct expression
*cond
)
3847 retval
= debug_target
.to_can_accel_watchpoint_condition (addr
, len
,
3850 fprintf_unfiltered (gdb_stdlog
,
3851 "target_can_accel_watchpoint_condition "
3852 "(%s, %d, %d, %s) = %ld\n",
3853 core_addr_to_string (addr
), len
, rw
,
3854 host_address_to_string (cond
), (unsigned long) retval
);
3859 debug_to_stopped_by_watchpoint (void)
3863 retval
= debug_target
.to_stopped_by_watchpoint ();
3865 fprintf_unfiltered (gdb_stdlog
,
3866 "target_stopped_by_watchpoint () = %ld\n",
3867 (unsigned long) retval
);
3872 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
3876 retval
= debug_target
.to_stopped_data_address (target
, addr
);
3878 fprintf_unfiltered (gdb_stdlog
,
3879 "target_stopped_data_address ([%s]) = %ld\n",
3880 core_addr_to_string (*addr
),
3881 (unsigned long)retval
);
3886 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
3888 CORE_ADDR start
, int length
)
3892 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
3895 fprintf_filtered (gdb_stdlog
,
3896 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3897 core_addr_to_string (addr
), core_addr_to_string (start
),
3903 debug_to_insert_hw_breakpoint (struct gdbarch
*gdbarch
,
3904 struct bp_target_info
*bp_tgt
)
3908 retval
= debug_target
.to_insert_hw_breakpoint (gdbarch
, bp_tgt
);
3910 fprintf_unfiltered (gdb_stdlog
,
3911 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3912 core_addr_to_string (bp_tgt
->placed_address
),
3913 (unsigned long) retval
);
3918 debug_to_remove_hw_breakpoint (struct gdbarch
*gdbarch
,
3919 struct bp_target_info
*bp_tgt
)
3923 retval
= debug_target
.to_remove_hw_breakpoint (gdbarch
, bp_tgt
);
3925 fprintf_unfiltered (gdb_stdlog
,
3926 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3927 core_addr_to_string (bp_tgt
->placed_address
),
3928 (unsigned long) retval
);
3933 debug_to_insert_watchpoint (CORE_ADDR addr
, int len
, int type
,
3934 struct expression
*cond
)
3938 retval
= debug_target
.to_insert_watchpoint (addr
, len
, type
, cond
);
3940 fprintf_unfiltered (gdb_stdlog
,
3941 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3942 core_addr_to_string (addr
), len
, type
,
3943 host_address_to_string (cond
), (unsigned long) retval
);
3948 debug_to_remove_watchpoint (CORE_ADDR addr
, int len
, int type
,
3949 struct expression
*cond
)
3953 retval
= debug_target
.to_remove_watchpoint (addr
, len
, type
, cond
);
3955 fprintf_unfiltered (gdb_stdlog
,
3956 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3957 core_addr_to_string (addr
), len
, type
,
3958 host_address_to_string (cond
), (unsigned long) retval
);
3963 debug_to_terminal_init (void)
3965 debug_target
.to_terminal_init ();
3967 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
3971 debug_to_terminal_inferior (void)
3973 debug_target
.to_terminal_inferior ();
3975 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
3979 debug_to_terminal_ours_for_output (void)
3981 debug_target
.to_terminal_ours_for_output ();
3983 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
3987 debug_to_terminal_ours (void)
3989 debug_target
.to_terminal_ours ();
3991 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
3995 debug_to_terminal_save_ours (void)
3997 debug_target
.to_terminal_save_ours ();
3999 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4003 debug_to_terminal_info (char *arg
, int from_tty
)
4005 debug_target
.to_terminal_info (arg
, from_tty
);
4007 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4012 debug_to_load (char *args
, int from_tty
)
4014 debug_target
.to_load (args
, from_tty
);
4016 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4020 debug_to_post_startup_inferior (ptid_t ptid
)
4022 debug_target
.to_post_startup_inferior (ptid
);
4024 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4029 debug_to_insert_fork_catchpoint (int pid
)
4033 retval
= debug_target
.to_insert_fork_catchpoint (pid
);
4035 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4042 debug_to_remove_fork_catchpoint (int pid
)
4046 retval
= debug_target
.to_remove_fork_catchpoint (pid
);
4048 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4055 debug_to_insert_vfork_catchpoint (int pid
)
4059 retval
= debug_target
.to_insert_vfork_catchpoint (pid
);
4061 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4068 debug_to_remove_vfork_catchpoint (int pid
)
4072 retval
= debug_target
.to_remove_vfork_catchpoint (pid
);
4074 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4081 debug_to_insert_exec_catchpoint (int pid
)
4085 retval
= debug_target
.to_insert_exec_catchpoint (pid
);
4087 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4094 debug_to_remove_exec_catchpoint (int pid
)
4098 retval
= debug_target
.to_remove_exec_catchpoint (pid
);
4100 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4107 debug_to_has_exited (int pid
, int wait_status
, int *exit_status
)
4111 has_exited
= debug_target
.to_has_exited (pid
, wait_status
, exit_status
);
4113 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4114 pid
, wait_status
, *exit_status
, has_exited
);
4120 debug_to_can_run (void)
4124 retval
= debug_target
.to_can_run ();
4126 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4131 static struct gdbarch
*
4132 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4134 struct gdbarch
*retval
;
4136 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4138 fprintf_unfiltered (gdb_stdlog
,
4139 "target_thread_architecture (%s) = %s [%s]\n",
4140 target_pid_to_str (ptid
),
4141 host_address_to_string (retval
),
4142 gdbarch_bfd_arch_info (retval
)->printable_name
);
4147 debug_to_stop (ptid_t ptid
)
4149 debug_target
.to_stop (ptid
);
4151 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4152 target_pid_to_str (ptid
));
4156 debug_to_rcmd (char *command
,
4157 struct ui_file
*outbuf
)
4159 debug_target
.to_rcmd (command
, outbuf
);
4160 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4164 debug_to_pid_to_exec_file (int pid
)
4168 exec_file
= debug_target
.to_pid_to_exec_file (pid
);
4170 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4177 setup_target_debug (void)
4179 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4181 current_target
.to_open
= debug_to_open
;
4182 current_target
.to_post_attach
= debug_to_post_attach
;
4183 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4184 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4185 current_target
.to_files_info
= debug_to_files_info
;
4186 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4187 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4188 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4189 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4190 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4191 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4192 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4193 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4194 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4195 current_target
.to_watchpoint_addr_within_range
4196 = debug_to_watchpoint_addr_within_range
;
4197 current_target
.to_region_ok_for_hw_watchpoint
4198 = debug_to_region_ok_for_hw_watchpoint
;
4199 current_target
.to_can_accel_watchpoint_condition
4200 = debug_to_can_accel_watchpoint_condition
;
4201 current_target
.to_terminal_init
= debug_to_terminal_init
;
4202 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4203 current_target
.to_terminal_ours_for_output
4204 = debug_to_terminal_ours_for_output
;
4205 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4206 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4207 current_target
.to_terminal_info
= debug_to_terminal_info
;
4208 current_target
.to_load
= debug_to_load
;
4209 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4210 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4211 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4212 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4213 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4214 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4215 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4216 current_target
.to_has_exited
= debug_to_has_exited
;
4217 current_target
.to_can_run
= debug_to_can_run
;
4218 current_target
.to_stop
= debug_to_stop
;
4219 current_target
.to_rcmd
= debug_to_rcmd
;
4220 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4221 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4225 static char targ_desc
[] =
4226 "Names of targets and files being debugged.\nShows the entire \
4227 stack of targets currently in use (including the exec-file,\n\
4228 core-file, and process, if any), as well as the symbol file name.";
4231 do_monitor_command (char *cmd
,
4234 if ((current_target
.to_rcmd
4235 == (void (*) (char *, struct ui_file
*)) tcomplain
)
4236 || (current_target
.to_rcmd
== debug_to_rcmd
4237 && (debug_target
.to_rcmd
4238 == (void (*) (char *, struct ui_file
*)) tcomplain
)))
4239 error (_("\"monitor\" command not supported by this target."));
4240 target_rcmd (cmd
, gdb_stdtarg
);
4243 /* Print the name of each layers of our target stack. */
4246 maintenance_print_target_stack (char *cmd
, int from_tty
)
4248 struct target_ops
*t
;
4250 printf_filtered (_("The current target stack is:\n"));
4252 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4254 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4258 /* Controls if async mode is permitted. */
4259 int target_async_permitted
= 0;
4261 /* The set command writes to this variable. If the inferior is
4262 executing, linux_nat_async_permitted is *not* updated. */
4263 static int target_async_permitted_1
= 0;
4266 set_maintenance_target_async_permitted (char *args
, int from_tty
,
4267 struct cmd_list_element
*c
)
4269 if (have_live_inferiors ())
4271 target_async_permitted_1
= target_async_permitted
;
4272 error (_("Cannot change this setting while the inferior is running."));
4275 target_async_permitted
= target_async_permitted_1
;
4279 show_maintenance_target_async_permitted (struct ui_file
*file
, int from_tty
,
4280 struct cmd_list_element
*c
,
4283 fprintf_filtered (file
,
4284 _("Controlling the inferior in "
4285 "asynchronous mode is %s.\n"), value
);
4288 /* Temporary copies of permission settings. */
4290 static int may_write_registers_1
= 1;
4291 static int may_write_memory_1
= 1;
4292 static int may_insert_breakpoints_1
= 1;
4293 static int may_insert_tracepoints_1
= 1;
4294 static int may_insert_fast_tracepoints_1
= 1;
4295 static int may_stop_1
= 1;
4297 /* Make the user-set values match the real values again. */
4300 update_target_permissions (void)
4302 may_write_registers_1
= may_write_registers
;
4303 may_write_memory_1
= may_write_memory
;
4304 may_insert_breakpoints_1
= may_insert_breakpoints
;
4305 may_insert_tracepoints_1
= may_insert_tracepoints
;
4306 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4307 may_stop_1
= may_stop
;
4310 /* The one function handles (most of) the permission flags in the same
4314 set_target_permissions (char *args
, int from_tty
,
4315 struct cmd_list_element
*c
)
4317 if (target_has_execution
)
4319 update_target_permissions ();
4320 error (_("Cannot change this setting while the inferior is running."));
4323 /* Make the real values match the user-changed values. */
4324 may_write_registers
= may_write_registers_1
;
4325 may_insert_breakpoints
= may_insert_breakpoints_1
;
4326 may_insert_tracepoints
= may_insert_tracepoints_1
;
4327 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4328 may_stop
= may_stop_1
;
4329 update_observer_mode ();
4332 /* Set memory write permission independently of observer mode. */
4335 set_write_memory_permission (char *args
, int from_tty
,
4336 struct cmd_list_element
*c
)
4338 /* Make the real values match the user-changed values. */
4339 may_write_memory
= may_write_memory_1
;
4340 update_observer_mode ();
4345 initialize_targets (void)
4347 init_dummy_target ();
4348 push_target (&dummy_target
);
4350 add_info ("target", target_info
, targ_desc
);
4351 add_info ("files", target_info
, targ_desc
);
4353 add_setshow_zinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4354 Set target debugging."), _("\
4355 Show target debugging."), _("\
4356 When non-zero, target debugging is enabled. Higher numbers are more\n\
4357 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4361 &setdebuglist
, &showdebuglist
);
4363 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4364 &trust_readonly
, _("\
4365 Set mode for reading from readonly sections."), _("\
4366 Show mode for reading from readonly sections."), _("\
4367 When this mode is on, memory reads from readonly sections (such as .text)\n\
4368 will be read from the object file instead of from the target. This will\n\
4369 result in significant performance improvement for remote targets."),
4371 show_trust_readonly
,
4372 &setlist
, &showlist
);
4374 add_com ("monitor", class_obscure
, do_monitor_command
,
4375 _("Send a command to the remote monitor (remote targets only)."));
4377 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4378 _("Print the name of each layer of the internal target stack."),
4379 &maintenanceprintlist
);
4381 add_setshow_boolean_cmd ("target-async", no_class
,
4382 &target_async_permitted_1
, _("\
4383 Set whether gdb controls the inferior in asynchronous mode."), _("\
4384 Show whether gdb controls the inferior in asynchronous mode."), _("\
4385 Tells gdb whether to control the inferior in asynchronous mode."),
4386 set_maintenance_target_async_permitted
,
4387 show_maintenance_target_async_permitted
,
4391 add_setshow_boolean_cmd ("stack-cache", class_support
,
4392 &stack_cache_enabled_p_1
, _("\
4393 Set cache use for stack access."), _("\
4394 Show cache use for stack access."), _("\
4395 When on, use the data cache for all stack access, regardless of any\n\
4396 configured memory regions. This improves remote performance significantly.\n\
4397 By default, caching for stack access is on."),
4398 set_stack_cache_enabled_p
,
4399 show_stack_cache_enabled_p
,
4400 &setlist
, &showlist
);
4402 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4403 &may_write_registers_1
, _("\
4404 Set permission to write into registers."), _("\
4405 Show permission to write into registers."), _("\
4406 When this permission is on, GDB may write into the target's registers.\n\
4407 Otherwise, any sort of write attempt will result in an error."),
4408 set_target_permissions
, NULL
,
4409 &setlist
, &showlist
);
4411 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4412 &may_write_memory_1
, _("\
4413 Set permission to write into target memory."), _("\
4414 Show permission to write into target memory."), _("\
4415 When this permission is on, GDB may write into the target's memory.\n\
4416 Otherwise, any sort of write attempt will result in an error."),
4417 set_write_memory_permission
, NULL
,
4418 &setlist
, &showlist
);
4420 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4421 &may_insert_breakpoints_1
, _("\
4422 Set permission to insert breakpoints in the target."), _("\
4423 Show permission to insert breakpoints in the target."), _("\
4424 When this permission is on, GDB may insert breakpoints in the program.\n\
4425 Otherwise, any sort of insertion attempt will result in an error."),
4426 set_target_permissions
, NULL
,
4427 &setlist
, &showlist
);
4429 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4430 &may_insert_tracepoints_1
, _("\
4431 Set permission to insert tracepoints in the target."), _("\
4432 Show permission to insert tracepoints in the target."), _("\
4433 When this permission is on, GDB may insert tracepoints in the program.\n\
4434 Otherwise, any sort of insertion attempt will result in an error."),
4435 set_target_permissions
, NULL
,
4436 &setlist
, &showlist
);
4438 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4439 &may_insert_fast_tracepoints_1
, _("\
4440 Set permission to insert fast tracepoints in the target."), _("\
4441 Show permission to insert fast tracepoints in the target."), _("\
4442 When this permission is on, GDB may insert fast tracepoints.\n\
4443 Otherwise, any sort of insertion attempt will result in an error."),
4444 set_target_permissions
, NULL
,
4445 &setlist
, &showlist
);
4447 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4449 Set permission to interrupt or signal the target."), _("\
4450 Show permission to interrupt or signal the target."), _("\
4451 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4452 Otherwise, any attempt to interrupt or stop will be ignored."),
4453 set_target_permissions
, NULL
,
4454 &setlist
, &showlist
);
4457 target_dcache
= dcache_init ();