1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 static void target_info (char *, int);
50 static void default_terminal_info (struct target_ops
*, const char *, int);
52 static int default_watchpoint_addr_within_range (struct target_ops
*,
53 CORE_ADDR
, CORE_ADDR
, int);
55 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
58 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
60 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
63 static void tcomplain (void) ATTRIBUTE_NORETURN
;
65 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
67 static int return_zero (void);
69 static void *return_null (void);
71 void target_ignore (void);
73 static void target_command (char *, int);
75 static struct target_ops
*find_default_run_target (char *);
77 static target_xfer_partial_ftype default_xfer_partial
;
79 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
82 static int dummy_find_memory_regions (struct target_ops
*self
,
83 find_memory_region_ftype ignore1
,
86 static char *dummy_make_corefile_notes (struct target_ops
*self
,
87 bfd
*ignore1
, int *ignore2
);
89 static int find_default_can_async_p (struct target_ops
*ignore
);
91 static int find_default_is_async_p (struct target_ops
*ignore
);
93 static enum exec_direction_kind default_execution_direction
94 (struct target_ops
*self
);
96 #include "target-delegates.c"
98 static void init_dummy_target (void);
100 static struct target_ops debug_target
;
102 static void debug_to_open (char *, int);
104 static void debug_to_prepare_to_store (struct target_ops
*self
,
107 static void debug_to_files_info (struct target_ops
*);
109 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
110 struct bp_target_info
*);
112 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
113 struct bp_target_info
*);
115 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
118 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
120 struct bp_target_info
*);
122 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
124 struct bp_target_info
*);
126 static int debug_to_insert_watchpoint (struct target_ops
*self
,
128 struct expression
*);
130 static int debug_to_remove_watchpoint (struct target_ops
*self
,
132 struct expression
*);
134 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
136 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
137 CORE_ADDR
, CORE_ADDR
, int);
139 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
142 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
144 struct expression
*);
146 static void debug_to_terminal_init (struct target_ops
*self
);
148 static void debug_to_terminal_inferior (struct target_ops
*self
);
150 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
152 static void debug_to_terminal_save_ours (struct target_ops
*self
);
154 static void debug_to_terminal_ours (struct target_ops
*self
);
156 static void debug_to_load (struct target_ops
*self
, char *, int);
158 static int debug_to_can_run (struct target_ops
*self
);
160 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
162 /* Pointer to array of target architecture structures; the size of the
163 array; the current index into the array; the allocated size of the
165 struct target_ops
**target_structs
;
166 unsigned target_struct_size
;
167 unsigned target_struct_allocsize
;
168 #define DEFAULT_ALLOCSIZE 10
170 /* The initial current target, so that there is always a semi-valid
173 static struct target_ops dummy_target
;
175 /* Top of target stack. */
177 static struct target_ops
*target_stack
;
179 /* The target structure we are currently using to talk to a process
180 or file or whatever "inferior" we have. */
182 struct target_ops current_target
;
184 /* Command list for target. */
186 static struct cmd_list_element
*targetlist
= NULL
;
188 /* Nonzero if we should trust readonly sections from the
189 executable when reading memory. */
191 static int trust_readonly
= 0;
193 /* Nonzero if we should show true memory content including
194 memory breakpoint inserted by gdb. */
196 static int show_memory_breakpoints
= 0;
198 /* These globals control whether GDB attempts to perform these
199 operations; they are useful for targets that need to prevent
200 inadvertant disruption, such as in non-stop mode. */
202 int may_write_registers
= 1;
204 int may_write_memory
= 1;
206 int may_insert_breakpoints
= 1;
208 int may_insert_tracepoints
= 1;
210 int may_insert_fast_tracepoints
= 1;
214 /* Non-zero if we want to see trace of target level stuff. */
216 static unsigned int targetdebug
= 0;
218 show_targetdebug (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
221 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
224 static void setup_target_debug (void);
226 /* The user just typed 'target' without the name of a target. */
229 target_command (char *arg
, int from_tty
)
231 fputs_filtered ("Argument required (target name). Try `help target'\n",
235 /* Default target_has_* methods for process_stratum targets. */
238 default_child_has_all_memory (struct target_ops
*ops
)
240 /* If no inferior selected, then we can't read memory here. */
241 if (ptid_equal (inferior_ptid
, null_ptid
))
248 default_child_has_memory (struct target_ops
*ops
)
250 /* If no inferior selected, then we can't read memory here. */
251 if (ptid_equal (inferior_ptid
, null_ptid
))
258 default_child_has_stack (struct target_ops
*ops
)
260 /* If no inferior selected, there's no stack. */
261 if (ptid_equal (inferior_ptid
, null_ptid
))
268 default_child_has_registers (struct target_ops
*ops
)
270 /* Can't read registers from no inferior. */
271 if (ptid_equal (inferior_ptid
, null_ptid
))
278 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
280 /* If there's no thread selected, then we can't make it run through
282 if (ptid_equal (the_ptid
, null_ptid
))
290 target_has_all_memory_1 (void)
292 struct target_ops
*t
;
294 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
295 if (t
->to_has_all_memory (t
))
302 target_has_memory_1 (void)
304 struct target_ops
*t
;
306 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
307 if (t
->to_has_memory (t
))
314 target_has_stack_1 (void)
316 struct target_ops
*t
;
318 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
319 if (t
->to_has_stack (t
))
326 target_has_registers_1 (void)
328 struct target_ops
*t
;
330 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
331 if (t
->to_has_registers (t
))
338 target_has_execution_1 (ptid_t the_ptid
)
340 struct target_ops
*t
;
342 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
343 if (t
->to_has_execution (t
, the_ptid
))
350 target_has_execution_current (void)
352 return target_has_execution_1 (inferior_ptid
);
355 /* Complete initialization of T. This ensures that various fields in
356 T are set, if needed by the target implementation. */
359 complete_target_initialization (struct target_ops
*t
)
361 /* Provide default values for all "must have" methods. */
362 if (t
->to_xfer_partial
== NULL
)
363 t
->to_xfer_partial
= default_xfer_partial
;
365 if (t
->to_has_all_memory
== NULL
)
366 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
368 if (t
->to_has_memory
== NULL
)
369 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
371 if (t
->to_has_stack
== NULL
)
372 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
374 if (t
->to_has_registers
== NULL
)
375 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
377 if (t
->to_has_execution
== NULL
)
378 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
380 install_delegators (t
);
383 /* Add possible target architecture T to the list and add a new
384 command 'target T->to_shortname'. Set COMPLETER as the command's
385 completer if not NULL. */
388 add_target_with_completer (struct target_ops
*t
,
389 completer_ftype
*completer
)
391 struct cmd_list_element
*c
;
393 complete_target_initialization (t
);
397 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
398 target_structs
= (struct target_ops
**) xmalloc
399 (target_struct_allocsize
* sizeof (*target_structs
));
401 if (target_struct_size
>= target_struct_allocsize
)
403 target_struct_allocsize
*= 2;
404 target_structs
= (struct target_ops
**)
405 xrealloc ((char *) target_structs
,
406 target_struct_allocsize
* sizeof (*target_structs
));
408 target_structs
[target_struct_size
++] = t
;
410 if (targetlist
== NULL
)
411 add_prefix_cmd ("target", class_run
, target_command
, _("\
412 Connect to a target machine or process.\n\
413 The first argument is the type or protocol of the target machine.\n\
414 Remaining arguments are interpreted by the target protocol. For more\n\
415 information on the arguments for a particular protocol, type\n\
416 `help target ' followed by the protocol name."),
417 &targetlist
, "target ", 0, &cmdlist
);
418 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
420 if (completer
!= NULL
)
421 set_cmd_completer (c
, completer
);
424 /* Add a possible target architecture to the list. */
427 add_target (struct target_ops
*t
)
429 add_target_with_completer (t
, NULL
);
435 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
437 struct cmd_list_element
*c
;
440 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
442 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
443 alt
= xstrprintf ("target %s", t
->to_shortname
);
444 deprecate_cmd (c
, alt
);
457 struct target_ops
*t
;
459 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
460 if (t
->to_kill
!= NULL
)
463 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
473 target_load (char *arg
, int from_tty
)
475 target_dcache_invalidate ();
476 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
480 target_create_inferior (char *exec_file
, char *args
,
481 char **env
, int from_tty
)
483 struct target_ops
*t
;
485 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
487 if (t
->to_create_inferior
!= NULL
)
489 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
491 fprintf_unfiltered (gdb_stdlog
,
492 "target_create_inferior (%s, %s, xxx, %d)\n",
493 exec_file
, args
, from_tty
);
498 internal_error (__FILE__
, __LINE__
,
499 _("could not find a target to create inferior"));
503 target_terminal_inferior (void)
505 /* A background resume (``run&'') should leave GDB in control of the
506 terminal. Use target_can_async_p, not target_is_async_p, since at
507 this point the target is not async yet. However, if sync_execution
508 is not set, we know it will become async prior to resume. */
509 if (target_can_async_p () && !sync_execution
)
512 /* If GDB is resuming the inferior in the foreground, install
513 inferior's terminal modes. */
514 (*current_target
.to_terminal_inferior
) (¤t_target
);
518 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
519 struct target_ops
*t
)
521 errno
= EIO
; /* Can't read/write this location. */
522 return 0; /* No bytes handled. */
528 error (_("You can't do that when your target is `%s'"),
529 current_target
.to_shortname
);
535 error (_("You can't do that without a process to debug."));
539 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
541 printf_unfiltered (_("No saved terminal information.\n"));
544 /* A default implementation for the to_get_ada_task_ptid target method.
546 This function builds the PTID by using both LWP and TID as part of
547 the PTID lwp and tid elements. The pid used is the pid of the
551 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
553 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
556 static enum exec_direction_kind
557 default_execution_direction (struct target_ops
*self
)
559 if (!target_can_execute_reverse
)
561 else if (!target_can_async_p ())
564 gdb_assert_not_reached ("\
565 to_execution_direction must be implemented for reverse async");
568 /* Go through the target stack from top to bottom, copying over zero
569 entries in current_target, then filling in still empty entries. In
570 effect, we are doing class inheritance through the pushed target
573 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
574 is currently implemented, is that it discards any knowledge of
575 which target an inherited method originally belonged to.
576 Consequently, new new target methods should instead explicitly and
577 locally search the target stack for the target that can handle the
581 update_current_target (void)
583 struct target_ops
*t
;
585 /* First, reset current's contents. */
586 memset (¤t_target
, 0, sizeof (current_target
));
588 /* Install the delegators. */
589 install_delegators (¤t_target
);
591 #define INHERIT(FIELD, TARGET) \
592 if (!current_target.FIELD) \
593 current_target.FIELD = (TARGET)->FIELD
595 for (t
= target_stack
; t
; t
= t
->beneath
)
597 INHERIT (to_shortname
, t
);
598 INHERIT (to_longname
, t
);
600 /* Do not inherit to_open. */
601 /* Do not inherit to_close. */
602 /* Do not inherit to_attach. */
603 /* Do not inherit to_post_attach. */
604 INHERIT (to_attach_no_wait
, t
);
605 /* Do not inherit to_detach. */
606 /* Do not inherit to_disconnect. */
607 /* Do not inherit to_resume. */
608 /* Do not inherit to_wait. */
609 /* Do not inherit to_fetch_registers. */
610 /* Do not inherit to_store_registers. */
611 /* Do not inherit to_prepare_to_store. */
612 INHERIT (deprecated_xfer_memory
, t
);
613 /* Do not inherit to_files_info. */
614 /* Do not inherit to_insert_breakpoint. */
615 /* Do not inherit to_remove_breakpoint. */
616 /* Do not inherit to_can_use_hw_breakpoint. */
617 /* Do not inherit to_insert_hw_breakpoint. */
618 /* Do not inherit to_remove_hw_breakpoint. */
619 /* Do not inherit to_ranged_break_num_registers. */
620 /* Do not inherit to_insert_watchpoint. */
621 /* Do not inherit to_remove_watchpoint. */
622 /* Do not inherit to_insert_mask_watchpoint. */
623 /* Do not inherit to_remove_mask_watchpoint. */
624 /* Do not inherit to_stopped_data_address. */
625 INHERIT (to_have_steppable_watchpoint
, t
);
626 INHERIT (to_have_continuable_watchpoint
, t
);
627 /* Do not inherit to_stopped_by_watchpoint. */
628 /* Do not inherit to_watchpoint_addr_within_range. */
629 /* Do not inherit to_region_ok_for_hw_watchpoint. */
630 /* Do not inherit to_can_accel_watchpoint_condition. */
631 /* Do not inherit to_masked_watch_num_registers. */
632 /* Do not inherit to_terminal_init. */
633 /* Do not inherit to_terminal_inferior. */
634 /* Do not inherit to_terminal_ours_for_output. */
635 /* Do not inherit to_terminal_ours. */
636 /* Do not inherit to_terminal_save_ours. */
637 /* Do not inherit to_terminal_info. */
638 /* Do not inherit to_kill. */
639 /* Do not inherit to_load. */
640 /* Do no inherit to_create_inferior. */
641 /* Do not inherit to_post_startup_inferior. */
642 /* Do not inherit to_insert_fork_catchpoint. */
643 /* Do not inherit to_remove_fork_catchpoint. */
644 /* Do not inherit to_insert_vfork_catchpoint. */
645 /* Do not inherit to_remove_vfork_catchpoint. */
646 /* Do not inherit to_follow_fork. */
647 /* Do not inherit to_insert_exec_catchpoint. */
648 /* Do not inherit to_remove_exec_catchpoint. */
649 /* Do not inherit to_set_syscall_catchpoint. */
650 /* Do not inherit to_has_exited. */
651 /* Do not inherit to_mourn_inferior. */
652 INHERIT (to_can_run
, t
);
653 /* Do not inherit to_pass_signals. */
654 /* Do not inherit to_program_signals. */
655 /* Do not inherit to_thread_alive. */
656 /* Do not inherit to_find_new_threads. */
657 /* Do not inherit to_pid_to_str. */
658 /* Do not inherit to_extra_thread_info. */
659 /* Do not inherit to_thread_name. */
660 INHERIT (to_stop
, t
);
661 /* Do not inherit to_xfer_partial. */
662 /* Do not inherit to_rcmd. */
663 /* Do not inherit to_pid_to_exec_file. */
664 /* Do not inherit to_log_command. */
665 INHERIT (to_stratum
, t
);
666 /* Do not inherit to_has_all_memory. */
667 /* Do not inherit to_has_memory. */
668 /* Do not inherit to_has_stack. */
669 /* Do not inherit to_has_registers. */
670 /* Do not inherit to_has_execution. */
671 INHERIT (to_has_thread_control
, t
);
672 /* Do not inherit to_can_async_p. */
673 /* Do not inherit to_is_async_p. */
674 /* Do not inherit to_async. */
675 /* Do not inherit to_find_memory_regions. */
676 /* Do not inherit to_make_corefile_notes. */
677 /* Do not inherit to_get_bookmark. */
678 /* Do not inherit to_goto_bookmark. */
679 /* Do not inherit to_get_thread_local_address. */
680 /* Do not inherit to_can_execute_reverse. */
681 /* Do not inherit to_execution_direction. */
682 /* Do not inherit to_thread_architecture. */
683 /* Do not inherit to_read_description. */
684 /* Do not inherit to_get_ada_task_ptid. */
685 /* Do not inherit to_search_memory. */
686 /* Do not inherit to_supports_multi_process. */
687 /* Do not inherit to_supports_enable_disable_tracepoint. */
688 /* Do not inherit to_supports_string_tracing. */
689 /* Do not inherit to_trace_init. */
690 /* Do not inherit to_download_tracepoint. */
691 /* Do not inherit to_can_download_tracepoint. */
692 /* Do not inherit to_download_trace_state_variable. */
693 /* Do not inherit to_enable_tracepoint. */
694 /* Do not inherit to_disable_tracepoint. */
695 /* Do not inherit to_trace_set_readonly_regions. */
696 /* Do not inherit to_trace_start. */
697 /* Do not inherit to_get_trace_status. */
698 /* Do not inherit to_get_tracepoint_status. */
699 /* Do not inherit to_trace_stop. */
700 /* Do not inherit to_trace_find. */
701 /* Do not inherit to_get_trace_state_variable_value. */
702 /* Do not inherit to_save_trace_data. */
703 /* Do not inherit to_upload_tracepoints. */
704 /* Do not inherit to_upload_trace_state_variables. */
705 /* Do not inherit to_get_raw_trace_data. */
706 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
707 /* Do not inherit to_set_disconnected_tracing. */
708 INHERIT (to_set_circular_trace_buffer
, t
);
709 INHERIT (to_set_trace_buffer_size
, t
);
710 INHERIT (to_set_trace_notes
, t
);
711 INHERIT (to_get_tib_address
, t
);
712 INHERIT (to_set_permissions
, t
);
713 INHERIT (to_static_tracepoint_marker_at
, t
);
714 INHERIT (to_static_tracepoint_markers_by_strid
, t
);
715 INHERIT (to_traceframe_info
, t
);
716 INHERIT (to_use_agent
, t
);
717 INHERIT (to_can_use_agent
, t
);
718 INHERIT (to_augmented_libraries_svr4_read
, t
);
719 INHERIT (to_magic
, t
);
720 INHERIT (to_supports_evaluation_of_breakpoint_conditions
, t
);
721 INHERIT (to_can_run_breakpoint_commands
, t
);
722 /* Do not inherit to_memory_map. */
723 /* Do not inherit to_flash_erase. */
724 /* Do not inherit to_flash_done. */
728 /* Clean up a target struct so it no longer has any zero pointers in
729 it. Some entries are defaulted to a method that print an error,
730 others are hard-wired to a standard recursive default. */
732 #define de_fault(field, value) \
733 if (!current_target.field) \
734 current_target.field = value
737 (void (*) (char *, int))
740 (void (*) (struct target_ops
*))
742 de_fault (deprecated_xfer_memory
,
743 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
744 struct mem_attrib
*, struct target_ops
*))
746 de_fault (to_can_run
,
747 (int (*) (struct target_ops
*))
750 (void (*) (struct target_ops
*, ptid_t
))
752 current_target
.to_read_description
= NULL
;
753 de_fault (to_set_circular_trace_buffer
,
754 (void (*) (struct target_ops
*, int))
756 de_fault (to_set_trace_buffer_size
,
757 (void (*) (struct target_ops
*, LONGEST
))
759 de_fault (to_set_trace_notes
,
760 (int (*) (struct target_ops
*,
761 const char *, const char *, const char *))
763 de_fault (to_get_tib_address
,
764 (int (*) (struct target_ops
*, ptid_t
, CORE_ADDR
*))
766 de_fault (to_set_permissions
,
767 (void (*) (struct target_ops
*))
769 de_fault (to_static_tracepoint_marker_at
,
770 (int (*) (struct target_ops
*,
771 CORE_ADDR
, struct static_tracepoint_marker
*))
773 de_fault (to_static_tracepoint_markers_by_strid
,
774 (VEC(static_tracepoint_marker_p
) * (*) (struct target_ops
*,
777 de_fault (to_traceframe_info
,
778 (struct traceframe_info
* (*) (struct target_ops
*))
780 de_fault (to_supports_evaluation_of_breakpoint_conditions
,
781 (int (*) (struct target_ops
*))
783 de_fault (to_can_run_breakpoint_commands
,
784 (int (*) (struct target_ops
*))
786 de_fault (to_use_agent
,
787 (int (*) (struct target_ops
*, int))
789 de_fault (to_can_use_agent
,
790 (int (*) (struct target_ops
*))
792 de_fault (to_augmented_libraries_svr4_read
,
793 (int (*) (struct target_ops
*))
798 /* Finally, position the target-stack beneath the squashed
799 "current_target". That way code looking for a non-inherited
800 target method can quickly and simply find it. */
801 current_target
.beneath
= target_stack
;
804 setup_target_debug ();
807 /* Push a new target type into the stack of the existing target accessors,
808 possibly superseding some of the existing accessors.
810 Rather than allow an empty stack, we always have the dummy target at
811 the bottom stratum, so we can call the function vectors without
815 push_target (struct target_ops
*t
)
817 struct target_ops
**cur
;
819 /* Check magic number. If wrong, it probably means someone changed
820 the struct definition, but not all the places that initialize one. */
821 if (t
->to_magic
!= OPS_MAGIC
)
823 fprintf_unfiltered (gdb_stderr
,
824 "Magic number of %s target struct wrong\n",
826 internal_error (__FILE__
, __LINE__
,
827 _("failed internal consistency check"));
830 /* Find the proper stratum to install this target in. */
831 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
833 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
837 /* If there's already targets at this stratum, remove them. */
838 /* FIXME: cagney/2003-10-15: I think this should be popping all
839 targets to CUR, and not just those at this stratum level. */
840 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
842 /* There's already something at this stratum level. Close it,
843 and un-hook it from the stack. */
844 struct target_ops
*tmp
= (*cur
);
846 (*cur
) = (*cur
)->beneath
;
851 /* We have removed all targets in our stratum, now add the new one. */
855 update_current_target ();
858 /* Remove a target_ops vector from the stack, wherever it may be.
859 Return how many times it was removed (0 or 1). */
862 unpush_target (struct target_ops
*t
)
864 struct target_ops
**cur
;
865 struct target_ops
*tmp
;
867 if (t
->to_stratum
== dummy_stratum
)
868 internal_error (__FILE__
, __LINE__
,
869 _("Attempt to unpush the dummy target"));
871 /* Look for the specified target. Note that we assume that a target
872 can only occur once in the target stack. */
874 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
880 /* If we don't find target_ops, quit. Only open targets should be
885 /* Unchain the target. */
887 (*cur
) = (*cur
)->beneath
;
890 update_current_target ();
892 /* Finally close the target. Note we do this after unchaining, so
893 any target method calls from within the target_close
894 implementation don't end up in T anymore. */
901 pop_all_targets_above (enum strata above_stratum
)
903 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
905 if (!unpush_target (target_stack
))
907 fprintf_unfiltered (gdb_stderr
,
908 "pop_all_targets couldn't find target %s\n",
909 target_stack
->to_shortname
);
910 internal_error (__FILE__
, __LINE__
,
911 _("failed internal consistency check"));
918 pop_all_targets (void)
920 pop_all_targets_above (dummy_stratum
);
923 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
926 target_is_pushed (struct target_ops
*t
)
928 struct target_ops
**cur
;
930 /* Check magic number. If wrong, it probably means someone changed
931 the struct definition, but not all the places that initialize one. */
932 if (t
->to_magic
!= OPS_MAGIC
)
934 fprintf_unfiltered (gdb_stderr
,
935 "Magic number of %s target struct wrong\n",
937 internal_error (__FILE__
, __LINE__
,
938 _("failed internal consistency check"));
941 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
948 /* Using the objfile specified in OBJFILE, find the address for the
949 current thread's thread-local storage with offset OFFSET. */
951 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
953 volatile CORE_ADDR addr
= 0;
954 struct target_ops
*target
;
956 for (target
= current_target
.beneath
;
958 target
= target
->beneath
)
960 if (target
->to_get_thread_local_address
!= NULL
)
965 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
967 ptid_t ptid
= inferior_ptid
;
968 volatile struct gdb_exception ex
;
970 TRY_CATCH (ex
, RETURN_MASK_ALL
)
974 /* Fetch the load module address for this objfile. */
975 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
977 /* If it's 0, throw the appropriate exception. */
979 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
980 _("TLS load module not found"));
982 addr
= target
->to_get_thread_local_address (target
, ptid
,
985 /* If an error occurred, print TLS related messages here. Otherwise,
986 throw the error to some higher catcher. */
989 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
993 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
994 error (_("Cannot find thread-local variables "
995 "in this thread library."));
997 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
998 if (objfile_is_library
)
999 error (_("Cannot find shared library `%s' in dynamic"
1000 " linker's load module list"), objfile_name (objfile
));
1002 error (_("Cannot find executable file `%s' in dynamic"
1003 " linker's load module list"), objfile_name (objfile
));
1005 case TLS_NOT_ALLOCATED_YET_ERROR
:
1006 if (objfile_is_library
)
1007 error (_("The inferior has not yet allocated storage for"
1008 " thread-local variables in\n"
1009 "the shared library `%s'\n"
1011 objfile_name (objfile
), target_pid_to_str (ptid
));
1013 error (_("The inferior has not yet allocated storage for"
1014 " thread-local variables in\n"
1015 "the executable `%s'\n"
1017 objfile_name (objfile
), target_pid_to_str (ptid
));
1019 case TLS_GENERIC_ERROR
:
1020 if (objfile_is_library
)
1021 error (_("Cannot find thread-local storage for %s, "
1022 "shared library %s:\n%s"),
1023 target_pid_to_str (ptid
),
1024 objfile_name (objfile
), ex
.message
);
1026 error (_("Cannot find thread-local storage for %s, "
1027 "executable file %s:\n%s"),
1028 target_pid_to_str (ptid
),
1029 objfile_name (objfile
), ex
.message
);
1032 throw_exception (ex
);
1037 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1038 TLS is an ABI-specific thing. But we don't do that yet. */
1040 error (_("Cannot find thread-local variables on this target"));
1046 target_xfer_status_to_string (enum target_xfer_status err
)
1048 #define CASE(X) case X: return #X
1051 CASE(TARGET_XFER_E_IO
);
1052 CASE(TARGET_XFER_E_UNAVAILABLE
);
1061 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1063 /* target_read_string -- read a null terminated string, up to LEN bytes,
1064 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1065 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1066 is responsible for freeing it. Return the number of bytes successfully
1070 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1072 int tlen
, offset
, i
;
1076 int buffer_allocated
;
1078 unsigned int nbytes_read
= 0;
1080 gdb_assert (string
);
1082 /* Small for testing. */
1083 buffer_allocated
= 4;
1084 buffer
= xmalloc (buffer_allocated
);
1089 tlen
= MIN (len
, 4 - (memaddr
& 3));
1090 offset
= memaddr
& 3;
1092 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1095 /* The transfer request might have crossed the boundary to an
1096 unallocated region of memory. Retry the transfer, requesting
1100 errcode
= target_read_memory (memaddr
, buf
, 1);
1105 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1109 bytes
= bufptr
- buffer
;
1110 buffer_allocated
*= 2;
1111 buffer
= xrealloc (buffer
, buffer_allocated
);
1112 bufptr
= buffer
+ bytes
;
1115 for (i
= 0; i
< tlen
; i
++)
1117 *bufptr
++ = buf
[i
+ offset
];
1118 if (buf
[i
+ offset
] == '\000')
1120 nbytes_read
+= i
+ 1;
1127 nbytes_read
+= tlen
;
1136 struct target_section_table
*
1137 target_get_section_table (struct target_ops
*target
)
1139 struct target_ops
*t
;
1142 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1144 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
1145 if (t
->to_get_section_table
!= NULL
)
1146 return (*t
->to_get_section_table
) (t
);
1151 /* Find a section containing ADDR. */
1153 struct target_section
*
1154 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1156 struct target_section_table
*table
= target_get_section_table (target
);
1157 struct target_section
*secp
;
1162 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1164 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1170 /* Read memory from the live target, even if currently inspecting a
1171 traceframe. The return is the same as that of target_read. */
1173 static enum target_xfer_status
1174 target_read_live_memory (enum target_object object
,
1175 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1176 ULONGEST
*xfered_len
)
1178 enum target_xfer_status ret
;
1179 struct cleanup
*cleanup
;
1181 /* Switch momentarily out of tfind mode so to access live memory.
1182 Note that this must not clear global state, such as the frame
1183 cache, which must still remain valid for the previous traceframe.
1184 We may be _building_ the frame cache at this point. */
1185 cleanup
= make_cleanup_restore_traceframe_number ();
1186 set_traceframe_number (-1);
1188 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1189 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1191 do_cleanups (cleanup
);
1195 /* Using the set of read-only target sections of OPS, read live
1196 read-only memory. Note that the actual reads start from the
1197 top-most target again.
1199 For interface/parameters/return description see target.h,
1202 static enum target_xfer_status
1203 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1204 enum target_object object
,
1205 gdb_byte
*readbuf
, ULONGEST memaddr
,
1206 ULONGEST len
, ULONGEST
*xfered_len
)
1208 struct target_section
*secp
;
1209 struct target_section_table
*table
;
1211 secp
= target_section_by_addr (ops
, memaddr
);
1213 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1214 secp
->the_bfd_section
)
1217 struct target_section
*p
;
1218 ULONGEST memend
= memaddr
+ len
;
1220 table
= target_get_section_table (ops
);
1222 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1224 if (memaddr
>= p
->addr
)
1226 if (memend
<= p
->endaddr
)
1228 /* Entire transfer is within this section. */
1229 return target_read_live_memory (object
, memaddr
,
1230 readbuf
, len
, xfered_len
);
1232 else if (memaddr
>= p
->endaddr
)
1234 /* This section ends before the transfer starts. */
1239 /* This section overlaps the transfer. Just do half. */
1240 len
= p
->endaddr
- memaddr
;
1241 return target_read_live_memory (object
, memaddr
,
1242 readbuf
, len
, xfered_len
);
1248 return TARGET_XFER_EOF
;
1251 /* Read memory from more than one valid target. A core file, for
1252 instance, could have some of memory but delegate other bits to
1253 the target below it. So, we must manually try all targets. */
1255 static enum target_xfer_status
1256 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1257 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1258 ULONGEST
*xfered_len
)
1260 enum target_xfer_status res
;
1264 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1265 readbuf
, writebuf
, memaddr
, len
,
1267 if (res
== TARGET_XFER_OK
)
1270 /* Stop if the target reports that the memory is not available. */
1271 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1274 /* We want to continue past core files to executables, but not
1275 past a running target's memory. */
1276 if (ops
->to_has_all_memory (ops
))
1281 while (ops
!= NULL
);
1286 /* Perform a partial memory transfer.
1287 For docs see target.h, to_xfer_partial. */
1289 static enum target_xfer_status
1290 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1291 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1292 ULONGEST len
, ULONGEST
*xfered_len
)
1294 enum target_xfer_status res
;
1296 struct mem_region
*region
;
1297 struct inferior
*inf
;
1299 /* For accesses to unmapped overlay sections, read directly from
1300 files. Must do this first, as MEMADDR may need adjustment. */
1301 if (readbuf
!= NULL
&& overlay_debugging
)
1303 struct obj_section
*section
= find_pc_overlay (memaddr
);
1305 if (pc_in_unmapped_range (memaddr
, section
))
1307 struct target_section_table
*table
1308 = target_get_section_table (ops
);
1309 const char *section_name
= section
->the_bfd_section
->name
;
1311 memaddr
= overlay_mapped_address (memaddr
, section
);
1312 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1313 memaddr
, len
, xfered_len
,
1315 table
->sections_end
,
1320 /* Try the executable files, if "trust-readonly-sections" is set. */
1321 if (readbuf
!= NULL
&& trust_readonly
)
1323 struct target_section
*secp
;
1324 struct target_section_table
*table
;
1326 secp
= target_section_by_addr (ops
, memaddr
);
1328 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1329 secp
->the_bfd_section
)
1332 table
= target_get_section_table (ops
);
1333 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1334 memaddr
, len
, xfered_len
,
1336 table
->sections_end
,
1341 /* If reading unavailable memory in the context of traceframes, and
1342 this address falls within a read-only section, fallback to
1343 reading from live memory. */
1344 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1346 VEC(mem_range_s
) *available
;
1348 /* If we fail to get the set of available memory, then the
1349 target does not support querying traceframe info, and so we
1350 attempt reading from the traceframe anyway (assuming the
1351 target implements the old QTro packet then). */
1352 if (traceframe_available_memory (&available
, memaddr
, len
))
1354 struct cleanup
*old_chain
;
1356 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1358 if (VEC_empty (mem_range_s
, available
)
1359 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1361 /* Don't read into the traceframe's available
1363 if (!VEC_empty (mem_range_s
, available
))
1365 LONGEST oldlen
= len
;
1367 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1368 gdb_assert (len
<= oldlen
);
1371 do_cleanups (old_chain
);
1373 /* This goes through the topmost target again. */
1374 res
= memory_xfer_live_readonly_partial (ops
, object
,
1377 if (res
== TARGET_XFER_OK
)
1378 return TARGET_XFER_OK
;
1381 /* No use trying further, we know some memory starting
1382 at MEMADDR isn't available. */
1384 return TARGET_XFER_E_UNAVAILABLE
;
1388 /* Don't try to read more than how much is available, in
1389 case the target implements the deprecated QTro packet to
1390 cater for older GDBs (the target's knowledge of read-only
1391 sections may be outdated by now). */
1392 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1394 do_cleanups (old_chain
);
1398 /* Try GDB's internal data cache. */
1399 region
= lookup_mem_region (memaddr
);
1400 /* region->hi == 0 means there's no upper bound. */
1401 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1404 reg_len
= region
->hi
- memaddr
;
1406 switch (region
->attrib
.mode
)
1409 if (writebuf
!= NULL
)
1410 return TARGET_XFER_E_IO
;
1414 if (readbuf
!= NULL
)
1415 return TARGET_XFER_E_IO
;
1419 /* We only support writing to flash during "load" for now. */
1420 if (writebuf
!= NULL
)
1421 error (_("Writing to flash memory forbidden in this context"));
1425 return TARGET_XFER_E_IO
;
1428 if (!ptid_equal (inferior_ptid
, null_ptid
))
1429 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1434 /* The dcache reads whole cache lines; that doesn't play well
1435 with reading from a trace buffer, because reading outside of
1436 the collected memory range fails. */
1437 && get_traceframe_number () == -1
1438 && (region
->attrib
.cache
1439 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1440 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1442 DCACHE
*dcache
= target_dcache_get_or_init ();
1445 if (readbuf
!= NULL
)
1446 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1448 /* FIXME drow/2006-08-09: If we're going to preserve const
1449 correctness dcache_xfer_memory should take readbuf and
1451 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1454 return TARGET_XFER_E_IO
;
1457 *xfered_len
= (ULONGEST
) l
;
1458 return TARGET_XFER_OK
;
1462 /* If none of those methods found the memory we wanted, fall back
1463 to a target partial transfer. Normally a single call to
1464 to_xfer_partial is enough; if it doesn't recognize an object
1465 it will call the to_xfer_partial of the next target down.
1466 But for memory this won't do. Memory is the only target
1467 object which can be read from more than one valid target.
1468 A core file, for instance, could have some of memory but
1469 delegate other bits to the target below it. So, we must
1470 manually try all targets. */
1472 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1475 /* Make sure the cache gets updated no matter what - if we are writing
1476 to the stack. Even if this write is not tagged as such, we still need
1477 to update the cache. */
1479 if (res
== TARGET_XFER_OK
1482 && target_dcache_init_p ()
1483 && !region
->attrib
.cache
1484 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1485 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1487 DCACHE
*dcache
= target_dcache_get ();
1489 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1492 /* If we still haven't got anything, return the last error. We
1497 /* Perform a partial memory transfer. For docs see target.h,
1500 static enum target_xfer_status
1501 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1502 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1503 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1505 enum target_xfer_status res
;
1507 /* Zero length requests are ok and require no work. */
1509 return TARGET_XFER_EOF
;
1511 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1512 breakpoint insns, thus hiding out from higher layers whether
1513 there are software breakpoints inserted in the code stream. */
1514 if (readbuf
!= NULL
)
1516 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1519 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1520 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1525 struct cleanup
*old_chain
;
1527 /* A large write request is likely to be partially satisfied
1528 by memory_xfer_partial_1. We will continually malloc
1529 and free a copy of the entire write request for breakpoint
1530 shadow handling even though we only end up writing a small
1531 subset of it. Cap writes to 4KB to mitigate this. */
1532 len
= min (4096, len
);
1534 buf
= xmalloc (len
);
1535 old_chain
= make_cleanup (xfree
, buf
);
1536 memcpy (buf
, writebuf
, len
);
1538 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1539 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1542 do_cleanups (old_chain
);
1549 restore_show_memory_breakpoints (void *arg
)
1551 show_memory_breakpoints
= (uintptr_t) arg
;
1555 make_show_memory_breakpoints_cleanup (int show
)
1557 int current
= show_memory_breakpoints
;
1559 show_memory_breakpoints
= show
;
1560 return make_cleanup (restore_show_memory_breakpoints
,
1561 (void *) (uintptr_t) current
);
1564 /* For docs see target.h, to_xfer_partial. */
1566 enum target_xfer_status
1567 target_xfer_partial (struct target_ops
*ops
,
1568 enum target_object object
, const char *annex
,
1569 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1570 ULONGEST offset
, ULONGEST len
,
1571 ULONGEST
*xfered_len
)
1573 enum target_xfer_status retval
;
1575 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1577 /* Transfer is done when LEN is zero. */
1579 return TARGET_XFER_EOF
;
1581 if (writebuf
&& !may_write_memory
)
1582 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1583 core_addr_to_string_nz (offset
), plongest (len
));
1587 /* If this is a memory transfer, let the memory-specific code
1588 have a look at it instead. Memory transfers are more
1590 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1591 || object
== TARGET_OBJECT_CODE_MEMORY
)
1592 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1593 writebuf
, offset
, len
, xfered_len
);
1594 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1596 /* Request the normal memory object from other layers. */
1597 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1601 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1602 writebuf
, offset
, len
, xfered_len
);
1606 const unsigned char *myaddr
= NULL
;
1608 fprintf_unfiltered (gdb_stdlog
,
1609 "%s:target_xfer_partial "
1610 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1613 (annex
? annex
: "(null)"),
1614 host_address_to_string (readbuf
),
1615 host_address_to_string (writebuf
),
1616 core_addr_to_string_nz (offset
),
1617 pulongest (len
), retval
,
1618 pulongest (*xfered_len
));
1624 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1628 fputs_unfiltered (", bytes =", gdb_stdlog
);
1629 for (i
= 0; i
< *xfered_len
; i
++)
1631 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1633 if (targetdebug
< 2 && i
> 0)
1635 fprintf_unfiltered (gdb_stdlog
, " ...");
1638 fprintf_unfiltered (gdb_stdlog
, "\n");
1641 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1645 fputc_unfiltered ('\n', gdb_stdlog
);
1648 /* Check implementations of to_xfer_partial update *XFERED_LEN
1649 properly. Do assertion after printing debug messages, so that we
1650 can find more clues on assertion failure from debugging messages. */
1651 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1652 gdb_assert (*xfered_len
> 0);
1657 /* Read LEN bytes of target memory at address MEMADDR, placing the
1658 results in GDB's memory at MYADDR. Returns either 0 for success or
1659 TARGET_XFER_E_IO if any error occurs.
1661 If an error occurs, no guarantee is made about the contents of the data at
1662 MYADDR. In particular, the caller should not depend upon partial reads
1663 filling the buffer with good data. There is no way for the caller to know
1664 how much good data might have been transfered anyway. Callers that can
1665 deal with partial reads should call target_read (which will retry until
1666 it makes no progress, and then return how much was transferred). */
1669 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1671 /* Dispatch to the topmost target, not the flattened current_target.
1672 Memory accesses check target->to_has_(all_)memory, and the
1673 flattened target doesn't inherit those. */
1674 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1675 myaddr
, memaddr
, len
) == len
)
1678 return TARGET_XFER_E_IO
;
1681 /* Like target_read_memory, but specify explicitly that this is a read
1682 from the target's raw memory. That is, this read bypasses the
1683 dcache, breakpoint shadowing, etc. */
1686 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1688 /* See comment in target_read_memory about why the request starts at
1689 current_target.beneath. */
1690 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1691 myaddr
, memaddr
, len
) == len
)
1694 return TARGET_XFER_E_IO
;
1697 /* Like target_read_memory, but specify explicitly that this is a read from
1698 the target's stack. This may trigger different cache behavior. */
1701 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1703 /* See comment in target_read_memory about why the request starts at
1704 current_target.beneath. */
1705 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1706 myaddr
, memaddr
, len
) == len
)
1709 return TARGET_XFER_E_IO
;
1712 /* Like target_read_memory, but specify explicitly that this is a read from
1713 the target's code. This may trigger different cache behavior. */
1716 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1718 /* See comment in target_read_memory about why the request starts at
1719 current_target.beneath. */
1720 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1721 myaddr
, memaddr
, len
) == len
)
1724 return TARGET_XFER_E_IO
;
1727 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1728 Returns either 0 for success or TARGET_XFER_E_IO if any
1729 error occurs. If an error occurs, no guarantee is made about how
1730 much data got written. Callers that can deal with partial writes
1731 should call target_write. */
1734 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1736 /* See comment in target_read_memory about why the request starts at
1737 current_target.beneath. */
1738 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1739 myaddr
, memaddr
, len
) == len
)
1742 return TARGET_XFER_E_IO
;
1745 /* Write LEN bytes from MYADDR to target raw memory at address
1746 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1747 if any error occurs. If an error occurs, no guarantee is made
1748 about how much data got written. Callers that can deal with
1749 partial writes should call target_write. */
1752 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1754 /* See comment in target_read_memory about why the request starts at
1755 current_target.beneath. */
1756 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1757 myaddr
, memaddr
, len
) == len
)
1760 return TARGET_XFER_E_IO
;
1763 /* Fetch the target's memory map. */
1766 target_memory_map (void)
1768 VEC(mem_region_s
) *result
;
1769 struct mem_region
*last_one
, *this_one
;
1771 struct target_ops
*t
;
1774 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1776 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1777 if (t
->to_memory_map
!= NULL
)
1783 result
= t
->to_memory_map (t
);
1787 qsort (VEC_address (mem_region_s
, result
),
1788 VEC_length (mem_region_s
, result
),
1789 sizeof (struct mem_region
), mem_region_cmp
);
1791 /* Check that regions do not overlap. Simultaneously assign
1792 a numbering for the "mem" commands to use to refer to
1795 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1797 this_one
->number
= ix
;
1799 if (last_one
&& last_one
->hi
> this_one
->lo
)
1801 warning (_("Overlapping regions in memory map: ignoring"));
1802 VEC_free (mem_region_s
, result
);
1805 last_one
= this_one
;
1812 target_flash_erase (ULONGEST address
, LONGEST length
)
1814 struct target_ops
*t
;
1816 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1817 if (t
->to_flash_erase
!= NULL
)
1820 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1821 hex_string (address
), phex (length
, 0));
1822 t
->to_flash_erase (t
, address
, length
);
1830 target_flash_done (void)
1832 struct target_ops
*t
;
1834 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
1835 if (t
->to_flash_done
!= NULL
)
1838 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1839 t
->to_flash_done (t
);
1847 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1848 struct cmd_list_element
*c
, const char *value
)
1850 fprintf_filtered (file
,
1851 _("Mode for reading from readonly sections is %s.\n"),
1855 /* More generic transfers. */
1857 static enum target_xfer_status
1858 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1859 const char *annex
, gdb_byte
*readbuf
,
1860 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1861 ULONGEST
*xfered_len
)
1863 if (object
== TARGET_OBJECT_MEMORY
1864 && ops
->deprecated_xfer_memory
!= NULL
)
1865 /* If available, fall back to the target's
1866 "deprecated_xfer_memory" method. */
1871 if (writebuf
!= NULL
)
1873 void *buffer
= xmalloc (len
);
1874 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1876 memcpy (buffer
, writebuf
, len
);
1877 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1878 1/*write*/, NULL
, ops
);
1879 do_cleanups (cleanup
);
1881 if (readbuf
!= NULL
)
1882 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1883 0/*read*/, NULL
, ops
);
1886 *xfered_len
= (ULONGEST
) xfered
;
1887 return TARGET_XFER_E_IO
;
1889 else if (xfered
== 0 && errno
== 0)
1890 /* "deprecated_xfer_memory" uses 0, cross checked against
1891 ERRNO as one indication of an error. */
1892 return TARGET_XFER_EOF
;
1894 return TARGET_XFER_E_IO
;
1898 gdb_assert (ops
->beneath
!= NULL
);
1899 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1900 readbuf
, writebuf
, offset
, len
,
1905 /* Target vector read/write partial wrapper functions. */
1907 static enum target_xfer_status
1908 target_read_partial (struct target_ops
*ops
,
1909 enum target_object object
,
1910 const char *annex
, gdb_byte
*buf
,
1911 ULONGEST offset
, ULONGEST len
,
1912 ULONGEST
*xfered_len
)
1914 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1918 static enum target_xfer_status
1919 target_write_partial (struct target_ops
*ops
,
1920 enum target_object object
,
1921 const char *annex
, const gdb_byte
*buf
,
1922 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1924 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1928 /* Wrappers to perform the full transfer. */
1930 /* For docs on target_read see target.h. */
1933 target_read (struct target_ops
*ops
,
1934 enum target_object object
,
1935 const char *annex
, gdb_byte
*buf
,
1936 ULONGEST offset
, LONGEST len
)
1940 while (xfered
< len
)
1942 ULONGEST xfered_len
;
1943 enum target_xfer_status status
;
1945 status
= target_read_partial (ops
, object
, annex
,
1946 (gdb_byte
*) buf
+ xfered
,
1947 offset
+ xfered
, len
- xfered
,
1950 /* Call an observer, notifying them of the xfer progress? */
1951 if (status
== TARGET_XFER_EOF
)
1953 else if (status
== TARGET_XFER_OK
)
1955 xfered
+= xfered_len
;
1965 /* Assuming that the entire [begin, end) range of memory cannot be
1966 read, try to read whatever subrange is possible to read.
1968 The function returns, in RESULT, either zero or one memory block.
1969 If there's a readable subrange at the beginning, it is completely
1970 read and returned. Any further readable subrange will not be read.
1971 Otherwise, if there's a readable subrange at the end, it will be
1972 completely read and returned. Any readable subranges before it
1973 (obviously, not starting at the beginning), will be ignored. In
1974 other cases -- either no readable subrange, or readable subrange(s)
1975 that is neither at the beginning, or end, nothing is returned.
1977 The purpose of this function is to handle a read across a boundary
1978 of accessible memory in a case when memory map is not available.
1979 The above restrictions are fine for this case, but will give
1980 incorrect results if the memory is 'patchy'. However, supporting
1981 'patchy' memory would require trying to read every single byte,
1982 and it seems unacceptable solution. Explicit memory map is
1983 recommended for this case -- and target_read_memory_robust will
1984 take care of reading multiple ranges then. */
1987 read_whatever_is_readable (struct target_ops
*ops
,
1988 ULONGEST begin
, ULONGEST end
,
1989 VEC(memory_read_result_s
) **result
)
1991 gdb_byte
*buf
= xmalloc (end
- begin
);
1992 ULONGEST current_begin
= begin
;
1993 ULONGEST current_end
= end
;
1995 memory_read_result_s r
;
1996 ULONGEST xfered_len
;
1998 /* If we previously failed to read 1 byte, nothing can be done here. */
1999 if (end
- begin
<= 1)
2005 /* Check that either first or the last byte is readable, and give up
2006 if not. This heuristic is meant to permit reading accessible memory
2007 at the boundary of accessible region. */
2008 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2009 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
2014 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2015 buf
+ (end
-begin
) - 1, end
- 1, 1,
2016 &xfered_len
) == TARGET_XFER_OK
)
2027 /* Loop invariant is that the [current_begin, current_end) was previously
2028 found to be not readable as a whole.
2030 Note loop condition -- if the range has 1 byte, we can't divide the range
2031 so there's no point trying further. */
2032 while (current_end
- current_begin
> 1)
2034 ULONGEST first_half_begin
, first_half_end
;
2035 ULONGEST second_half_begin
, second_half_end
;
2037 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
2041 first_half_begin
= current_begin
;
2042 first_half_end
= middle
;
2043 second_half_begin
= middle
;
2044 second_half_end
= current_end
;
2048 first_half_begin
= middle
;
2049 first_half_end
= current_end
;
2050 second_half_begin
= current_begin
;
2051 second_half_end
= middle
;
2054 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2055 buf
+ (first_half_begin
- begin
),
2057 first_half_end
- first_half_begin
);
2059 if (xfer
== first_half_end
- first_half_begin
)
2061 /* This half reads up fine. So, the error must be in the
2063 current_begin
= second_half_begin
;
2064 current_end
= second_half_end
;
2068 /* This half is not readable. Because we've tried one byte, we
2069 know some part of this half if actually redable. Go to the next
2070 iteration to divide again and try to read.
2072 We don't handle the other half, because this function only tries
2073 to read a single readable subrange. */
2074 current_begin
= first_half_begin
;
2075 current_end
= first_half_end
;
2081 /* The [begin, current_begin) range has been read. */
2083 r
.end
= current_begin
;
2088 /* The [current_end, end) range has been read. */
2089 LONGEST rlen
= end
- current_end
;
2091 r
.data
= xmalloc (rlen
);
2092 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2093 r
.begin
= current_end
;
2097 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2101 free_memory_read_result_vector (void *x
)
2103 VEC(memory_read_result_s
) *v
= x
;
2104 memory_read_result_s
*current
;
2107 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2109 xfree (current
->data
);
2111 VEC_free (memory_read_result_s
, v
);
2114 VEC(memory_read_result_s
) *
2115 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2117 VEC(memory_read_result_s
) *result
= 0;
2120 while (xfered
< len
)
2122 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2125 /* If there is no explicit region, a fake one should be created. */
2126 gdb_assert (region
);
2128 if (region
->hi
== 0)
2129 rlen
= len
- xfered
;
2131 rlen
= region
->hi
- offset
;
2133 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2135 /* Cannot read this region. Note that we can end up here only
2136 if the region is explicitly marked inaccessible, or
2137 'inaccessible-by-default' is in effect. */
2142 LONGEST to_read
= min (len
- xfered
, rlen
);
2143 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2145 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2146 (gdb_byte
*) buffer
,
2147 offset
+ xfered
, to_read
);
2148 /* Call an observer, notifying them of the xfer progress? */
2151 /* Got an error reading full chunk. See if maybe we can read
2154 read_whatever_is_readable (ops
, offset
+ xfered
,
2155 offset
+ xfered
+ to_read
, &result
);
2160 struct memory_read_result r
;
2162 r
.begin
= offset
+ xfered
;
2163 r
.end
= r
.begin
+ xfer
;
2164 VEC_safe_push (memory_read_result_s
, result
, &r
);
2174 /* An alternative to target_write with progress callbacks. */
2177 target_write_with_progress (struct target_ops
*ops
,
2178 enum target_object object
,
2179 const char *annex
, const gdb_byte
*buf
,
2180 ULONGEST offset
, LONGEST len
,
2181 void (*progress
) (ULONGEST
, void *), void *baton
)
2185 /* Give the progress callback a chance to set up. */
2187 (*progress
) (0, baton
);
2189 while (xfered
< len
)
2191 ULONGEST xfered_len
;
2192 enum target_xfer_status status
;
2194 status
= target_write_partial (ops
, object
, annex
,
2195 (gdb_byte
*) buf
+ xfered
,
2196 offset
+ xfered
, len
- xfered
,
2199 if (status
== TARGET_XFER_EOF
)
2201 if (TARGET_XFER_STATUS_ERROR_P (status
))
2204 gdb_assert (status
== TARGET_XFER_OK
);
2206 (*progress
) (xfered_len
, baton
);
2208 xfered
+= xfered_len
;
2214 /* For docs on target_write see target.h. */
2217 target_write (struct target_ops
*ops
,
2218 enum target_object object
,
2219 const char *annex
, const gdb_byte
*buf
,
2220 ULONGEST offset
, LONGEST len
)
2222 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2226 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2227 the size of the transferred data. PADDING additional bytes are
2228 available in *BUF_P. This is a helper function for
2229 target_read_alloc; see the declaration of that function for more
2233 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2234 const char *annex
, gdb_byte
**buf_p
, int padding
)
2236 size_t buf_alloc
, buf_pos
;
2239 /* This function does not have a length parameter; it reads the
2240 entire OBJECT). Also, it doesn't support objects fetched partly
2241 from one target and partly from another (in a different stratum,
2242 e.g. a core file and an executable). Both reasons make it
2243 unsuitable for reading memory. */
2244 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2246 /* Start by reading up to 4K at a time. The target will throttle
2247 this number down if necessary. */
2249 buf
= xmalloc (buf_alloc
);
2253 ULONGEST xfered_len
;
2254 enum target_xfer_status status
;
2256 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2257 buf_pos
, buf_alloc
- buf_pos
- padding
,
2260 if (status
== TARGET_XFER_EOF
)
2262 /* Read all there was. */
2269 else if (status
!= TARGET_XFER_OK
)
2271 /* An error occurred. */
2273 return TARGET_XFER_E_IO
;
2276 buf_pos
+= xfered_len
;
2278 /* If the buffer is filling up, expand it. */
2279 if (buf_alloc
< buf_pos
* 2)
2282 buf
= xrealloc (buf
, buf_alloc
);
2289 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2290 the size of the transferred data. See the declaration in "target.h"
2291 function for more information about the return value. */
2294 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2295 const char *annex
, gdb_byte
**buf_p
)
2297 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2300 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2301 returned as a string, allocated using xmalloc. If an error occurs
2302 or the transfer is unsupported, NULL is returned. Empty objects
2303 are returned as allocated but empty strings. A warning is issued
2304 if the result contains any embedded NUL bytes. */
2307 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2312 LONGEST i
, transferred
;
2314 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2315 bufstr
= (char *) buffer
;
2317 if (transferred
< 0)
2320 if (transferred
== 0)
2321 return xstrdup ("");
2323 bufstr
[transferred
] = 0;
2325 /* Check for embedded NUL bytes; but allow trailing NULs. */
2326 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2329 warning (_("target object %d, annex %s, "
2330 "contained unexpected null characters"),
2331 (int) object
, annex
? annex
: "(none)");
2338 /* Memory transfer methods. */
2341 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2344 /* This method is used to read from an alternate, non-current
2345 target. This read must bypass the overlay support (as symbols
2346 don't match this target), and GDB's internal cache (wrong cache
2347 for this target). */
2348 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2350 memory_error (TARGET_XFER_E_IO
, addr
);
2354 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2355 int len
, enum bfd_endian byte_order
)
2357 gdb_byte buf
[sizeof (ULONGEST
)];
2359 gdb_assert (len
<= sizeof (buf
));
2360 get_target_memory (ops
, addr
, buf
, len
);
2361 return extract_unsigned_integer (buf
, len
, byte_order
);
2367 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2368 struct bp_target_info
*bp_tgt
)
2370 if (!may_insert_breakpoints
)
2372 warning (_("May not insert breakpoints"));
2376 return current_target
.to_insert_breakpoint (¤t_target
,
2383 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2384 struct bp_target_info
*bp_tgt
)
2386 /* This is kind of a weird case to handle, but the permission might
2387 have been changed after breakpoints were inserted - in which case
2388 we should just take the user literally and assume that any
2389 breakpoints should be left in place. */
2390 if (!may_insert_breakpoints
)
2392 warning (_("May not remove breakpoints"));
2396 return current_target
.to_remove_breakpoint (¤t_target
,
2401 target_info (char *args
, int from_tty
)
2403 struct target_ops
*t
;
2404 int has_all_mem
= 0;
2406 if (symfile_objfile
!= NULL
)
2407 printf_unfiltered (_("Symbols from \"%s\".\n"),
2408 objfile_name (symfile_objfile
));
2410 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2412 if (!(*t
->to_has_memory
) (t
))
2415 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2418 printf_unfiltered (_("\tWhile running this, "
2419 "GDB does not access memory from...\n"));
2420 printf_unfiltered ("%s:\n", t
->to_longname
);
2421 (t
->to_files_info
) (t
);
2422 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2426 /* This function is called before any new inferior is created, e.g.
2427 by running a program, attaching, or connecting to a target.
2428 It cleans up any state from previous invocations which might
2429 change between runs. This is a subset of what target_preopen
2430 resets (things which might change between targets). */
2433 target_pre_inferior (int from_tty
)
2435 /* Clear out solib state. Otherwise the solib state of the previous
2436 inferior might have survived and is entirely wrong for the new
2437 target. This has been observed on GNU/Linux using glibc 2.3. How
2449 Cannot access memory at address 0xdeadbeef
2452 /* In some OSs, the shared library list is the same/global/shared
2453 across inferiors. If code is shared between processes, so are
2454 memory regions and features. */
2455 if (!gdbarch_has_global_solist (target_gdbarch ()))
2457 no_shared_libraries (NULL
, from_tty
);
2459 invalidate_target_mem_regions ();
2461 target_clear_description ();
2464 agent_capability_invalidate ();
2467 /* Callback for iterate_over_inferiors. Gets rid of the given
2471 dispose_inferior (struct inferior
*inf
, void *args
)
2473 struct thread_info
*thread
;
2475 thread
= any_thread_of_process (inf
->pid
);
2478 switch_to_thread (thread
->ptid
);
2480 /* Core inferiors actually should be detached, not killed. */
2481 if (target_has_execution
)
2484 target_detach (NULL
, 0);
2490 /* This is to be called by the open routine before it does
2494 target_preopen (int from_tty
)
2498 if (have_inferiors ())
2501 || !have_live_inferiors ()
2502 || query (_("A program is being debugged already. Kill it? ")))
2503 iterate_over_inferiors (dispose_inferior
, NULL
);
2505 error (_("Program not killed."));
2508 /* Calling target_kill may remove the target from the stack. But if
2509 it doesn't (which seems like a win for UDI), remove it now. */
2510 /* Leave the exec target, though. The user may be switching from a
2511 live process to a core of the same program. */
2512 pop_all_targets_above (file_stratum
);
2514 target_pre_inferior (from_tty
);
2517 /* Detach a target after doing deferred register stores. */
2520 target_detach (const char *args
, int from_tty
)
2522 struct target_ops
* t
;
2524 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2525 /* Don't remove global breakpoints here. They're removed on
2526 disconnection from the target. */
2529 /* If we're in breakpoints-always-inserted mode, have to remove
2530 them before detaching. */
2531 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2533 prepare_for_detach ();
2535 current_target
.to_detach (¤t_target
, args
, from_tty
);
2537 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2542 target_disconnect (char *args
, int from_tty
)
2544 struct target_ops
*t
;
2546 /* If we're in breakpoints-always-inserted mode or if breakpoints
2547 are global across processes, we have to remove them before
2549 remove_breakpoints ();
2551 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2552 if (t
->to_disconnect
!= NULL
)
2555 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2557 t
->to_disconnect (t
, args
, from_tty
);
2565 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2567 struct target_ops
*t
;
2568 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2573 char *status_string
;
2574 char *options_string
;
2576 status_string
= target_waitstatus_to_string (status
);
2577 options_string
= target_options_to_string (options
);
2578 fprintf_unfiltered (gdb_stdlog
,
2579 "target_wait (%d, status, options={%s})"
2581 ptid_get_pid (ptid
), options_string
,
2582 ptid_get_pid (retval
), status_string
);
2583 xfree (status_string
);
2584 xfree (options_string
);
2591 target_pid_to_str (ptid_t ptid
)
2593 struct target_ops
*t
;
2595 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2597 if (t
->to_pid_to_str
!= NULL
)
2598 return (*t
->to_pid_to_str
) (t
, ptid
);
2601 return normal_pid_to_str (ptid
);
2605 target_thread_name (struct thread_info
*info
)
2607 return current_target
.to_thread_name (¤t_target
, info
);
2611 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2613 struct target_ops
*t
;
2615 target_dcache_invalidate ();
2617 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2619 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2620 ptid_get_pid (ptid
),
2621 step
? "step" : "continue",
2622 gdb_signal_to_name (signal
));
2624 registers_changed_ptid (ptid
);
2625 set_executing (ptid
, 1);
2626 set_running (ptid
, 1);
2627 clear_inline_frame_state (ptid
);
2631 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2633 struct target_ops
*t
;
2635 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2637 if (t
->to_pass_signals
!= NULL
)
2643 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2646 for (i
= 0; i
< numsigs
; i
++)
2647 if (pass_signals
[i
])
2648 fprintf_unfiltered (gdb_stdlog
, " %s",
2649 gdb_signal_to_name (i
));
2651 fprintf_unfiltered (gdb_stdlog
, " })\n");
2654 (*t
->to_pass_signals
) (t
, numsigs
, pass_signals
);
2661 target_program_signals (int numsigs
, unsigned char *program_signals
)
2663 struct target_ops
*t
;
2665 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2667 if (t
->to_program_signals
!= NULL
)
2673 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2676 for (i
= 0; i
< numsigs
; i
++)
2677 if (program_signals
[i
])
2678 fprintf_unfiltered (gdb_stdlog
, " %s",
2679 gdb_signal_to_name (i
));
2681 fprintf_unfiltered (gdb_stdlog
, " })\n");
2684 (*t
->to_program_signals
) (t
, numsigs
, program_signals
);
2690 /* Look through the list of possible targets for a target that can
2694 target_follow_fork (int follow_child
, int detach_fork
)
2696 struct target_ops
*t
;
2698 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2700 if (t
->to_follow_fork
!= NULL
)
2702 int retval
= t
->to_follow_fork (t
, follow_child
, detach_fork
);
2705 fprintf_unfiltered (gdb_stdlog
,
2706 "target_follow_fork (%d, %d) = %d\n",
2707 follow_child
, detach_fork
, retval
);
2712 /* Some target returned a fork event, but did not know how to follow it. */
2713 internal_error (__FILE__
, __LINE__
,
2714 _("could not find a target to follow fork"));
2718 target_mourn_inferior (void)
2720 struct target_ops
*t
;
2722 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2724 if (t
->to_mourn_inferior
!= NULL
)
2726 t
->to_mourn_inferior (t
);
2728 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2730 /* We no longer need to keep handles on any of the object files.
2731 Make sure to release them to avoid unnecessarily locking any
2732 of them while we're not actually debugging. */
2733 bfd_cache_close_all ();
2739 internal_error (__FILE__
, __LINE__
,
2740 _("could not find a target to follow mourn inferior"));
2743 /* Look for a target which can describe architectural features, starting
2744 from TARGET. If we find one, return its description. */
2746 const struct target_desc
*
2747 target_read_description (struct target_ops
*target
)
2749 struct target_ops
*t
;
2751 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2752 if (t
->to_read_description
!= NULL
)
2754 const struct target_desc
*tdesc
;
2756 tdesc
= t
->to_read_description (t
);
2764 /* The default implementation of to_search_memory.
2765 This implements a basic search of memory, reading target memory and
2766 performing the search here (as opposed to performing the search in on the
2767 target side with, for example, gdbserver). */
2770 simple_search_memory (struct target_ops
*ops
,
2771 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2772 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2773 CORE_ADDR
*found_addrp
)
2775 /* NOTE: also defined in find.c testcase. */
2776 #define SEARCH_CHUNK_SIZE 16000
2777 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2778 /* Buffer to hold memory contents for searching. */
2779 gdb_byte
*search_buf
;
2780 unsigned search_buf_size
;
2781 struct cleanup
*old_cleanups
;
2783 search_buf_size
= chunk_size
+ pattern_len
- 1;
2785 /* No point in trying to allocate a buffer larger than the search space. */
2786 if (search_space_len
< search_buf_size
)
2787 search_buf_size
= search_space_len
;
2789 search_buf
= malloc (search_buf_size
);
2790 if (search_buf
== NULL
)
2791 error (_("Unable to allocate memory to perform the search."));
2792 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2794 /* Prime the search buffer. */
2796 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2797 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2799 warning (_("Unable to access %s bytes of target "
2800 "memory at %s, halting search."),
2801 pulongest (search_buf_size
), hex_string (start_addr
));
2802 do_cleanups (old_cleanups
);
2806 /* Perform the search.
2808 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2809 When we've scanned N bytes we copy the trailing bytes to the start and
2810 read in another N bytes. */
2812 while (search_space_len
>= pattern_len
)
2814 gdb_byte
*found_ptr
;
2815 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2817 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2818 pattern
, pattern_len
);
2820 if (found_ptr
!= NULL
)
2822 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2824 *found_addrp
= found_addr
;
2825 do_cleanups (old_cleanups
);
2829 /* Not found in this chunk, skip to next chunk. */
2831 /* Don't let search_space_len wrap here, it's unsigned. */
2832 if (search_space_len
>= chunk_size
)
2833 search_space_len
-= chunk_size
;
2835 search_space_len
= 0;
2837 if (search_space_len
>= pattern_len
)
2839 unsigned keep_len
= search_buf_size
- chunk_size
;
2840 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2843 /* Copy the trailing part of the previous iteration to the front
2844 of the buffer for the next iteration. */
2845 gdb_assert (keep_len
== pattern_len
- 1);
2846 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2848 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2850 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2851 search_buf
+ keep_len
, read_addr
,
2852 nr_to_read
) != nr_to_read
)
2854 warning (_("Unable to access %s bytes of target "
2855 "memory at %s, halting search."),
2856 plongest (nr_to_read
),
2857 hex_string (read_addr
));
2858 do_cleanups (old_cleanups
);
2862 start_addr
+= chunk_size
;
2868 do_cleanups (old_cleanups
);
2872 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2873 sequence of bytes in PATTERN with length PATTERN_LEN.
2875 The result is 1 if found, 0 if not found, and -1 if there was an error
2876 requiring halting of the search (e.g. memory read error).
2877 If the pattern is found the address is recorded in FOUND_ADDRP. */
2880 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2881 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2882 CORE_ADDR
*found_addrp
)
2884 struct target_ops
*t
;
2887 /* We don't use INHERIT to set current_target.to_search_memory,
2888 so we have to scan the target stack and handle targetdebug
2892 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2893 hex_string (start_addr
));
2895 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2896 if (t
->to_search_memory
!= NULL
)
2901 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2902 pattern
, pattern_len
, found_addrp
);
2906 /* If a special version of to_search_memory isn't available, use the
2908 found
= simple_search_memory (current_target
.beneath
,
2909 start_addr
, search_space_len
,
2910 pattern
, pattern_len
, found_addrp
);
2914 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2919 /* Look through the currently pushed targets. If none of them will
2920 be able to restart the currently running process, issue an error
2924 target_require_runnable (void)
2926 struct target_ops
*t
;
2928 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2930 /* If this target knows how to create a new program, then
2931 assume we will still be able to after killing the current
2932 one. Either killing and mourning will not pop T, or else
2933 find_default_run_target will find it again. */
2934 if (t
->to_create_inferior
!= NULL
)
2937 /* Do not worry about thread_stratum targets that can not
2938 create inferiors. Assume they will be pushed again if
2939 necessary, and continue to the process_stratum. */
2940 if (t
->to_stratum
== thread_stratum
2941 || t
->to_stratum
== arch_stratum
)
2944 error (_("The \"%s\" target does not support \"run\". "
2945 "Try \"help target\" or \"continue\"."),
2949 /* This function is only called if the target is running. In that
2950 case there should have been a process_stratum target and it
2951 should either know how to create inferiors, or not... */
2952 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2955 /* Look through the list of possible targets for a target that can
2956 execute a run or attach command without any other data. This is
2957 used to locate the default process stratum.
2959 If DO_MESG is not NULL, the result is always valid (error() is
2960 called for errors); else, return NULL on error. */
2962 static struct target_ops
*
2963 find_default_run_target (char *do_mesg
)
2965 struct target_ops
**t
;
2966 struct target_ops
*runable
= NULL
;
2971 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2974 if ((*t
)->to_can_run
&& target_can_run (*t
))
2984 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2993 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
2995 struct target_ops
*t
;
2997 t
= find_default_run_target ("attach");
2998 (t
->to_attach
) (t
, args
, from_tty
);
3003 find_default_create_inferior (struct target_ops
*ops
,
3004 char *exec_file
, char *allargs
, char **env
,
3007 struct target_ops
*t
;
3009 t
= find_default_run_target ("run");
3010 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
3015 find_default_can_async_p (struct target_ops
*ignore
)
3017 struct target_ops
*t
;
3019 /* This may be called before the target is pushed on the stack;
3020 look for the default process stratum. If there's none, gdb isn't
3021 configured with a native debugger, and target remote isn't
3023 t
= find_default_run_target (NULL
);
3024 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
3025 return (t
->to_can_async_p
) (t
);
3030 find_default_is_async_p (struct target_ops
*ignore
)
3032 struct target_ops
*t
;
3034 /* This may be called before the target is pushed on the stack;
3035 look for the default process stratum. If there's none, gdb isn't
3036 configured with a native debugger, and target remote isn't
3038 t
= find_default_run_target (NULL
);
3039 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
3040 return (t
->to_is_async_p
) (t
);
3045 find_default_supports_non_stop (struct target_ops
*self
)
3047 struct target_ops
*t
;
3049 t
= find_default_run_target (NULL
);
3050 if (t
&& t
->to_supports_non_stop
)
3051 return (t
->to_supports_non_stop
) (t
);
3056 target_supports_non_stop (void)
3058 struct target_ops
*t
;
3060 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3061 if (t
->to_supports_non_stop
)
3062 return t
->to_supports_non_stop (t
);
3067 /* Implement the "info proc" command. */
3070 target_info_proc (char *args
, enum info_proc_what what
)
3072 struct target_ops
*t
;
3074 /* If we're already connected to something that can get us OS
3075 related data, use it. Otherwise, try using the native
3077 if (current_target
.to_stratum
>= process_stratum
)
3078 t
= current_target
.beneath
;
3080 t
= find_default_run_target (NULL
);
3082 for (; t
!= NULL
; t
= t
->beneath
)
3084 if (t
->to_info_proc
!= NULL
)
3086 t
->to_info_proc (t
, args
, what
);
3089 fprintf_unfiltered (gdb_stdlog
,
3090 "target_info_proc (\"%s\", %d)\n", args
, what
);
3100 find_default_supports_disable_randomization (struct target_ops
*self
)
3102 struct target_ops
*t
;
3104 t
= find_default_run_target (NULL
);
3105 if (t
&& t
->to_supports_disable_randomization
)
3106 return (t
->to_supports_disable_randomization
) (t
);
3111 target_supports_disable_randomization (void)
3113 struct target_ops
*t
;
3115 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3116 if (t
->to_supports_disable_randomization
)
3117 return t
->to_supports_disable_randomization (t
);
3123 target_get_osdata (const char *type
)
3125 struct target_ops
*t
;
3127 /* If we're already connected to something that can get us OS
3128 related data, use it. Otherwise, try using the native
3130 if (current_target
.to_stratum
>= process_stratum
)
3131 t
= current_target
.beneath
;
3133 t
= find_default_run_target ("get OS data");
3138 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3141 /* Determine the current address space of thread PTID. */
3143 struct address_space
*
3144 target_thread_address_space (ptid_t ptid
)
3146 struct address_space
*aspace
;
3147 struct inferior
*inf
;
3148 struct target_ops
*t
;
3150 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3152 if (t
->to_thread_address_space
!= NULL
)
3154 aspace
= t
->to_thread_address_space (t
, ptid
);
3155 gdb_assert (aspace
);
3158 fprintf_unfiltered (gdb_stdlog
,
3159 "target_thread_address_space (%s) = %d\n",
3160 target_pid_to_str (ptid
),
3161 address_space_num (aspace
));
3166 /* Fall-back to the "main" address space of the inferior. */
3167 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3169 if (inf
== NULL
|| inf
->aspace
== NULL
)
3170 internal_error (__FILE__
, __LINE__
,
3171 _("Can't determine the current "
3172 "address space of thread %s\n"),
3173 target_pid_to_str (ptid
));
3179 /* Target file operations. */
3181 static struct target_ops
*
3182 default_fileio_target (void)
3184 /* If we're already connected to something that can perform
3185 file I/O, use it. Otherwise, try using the native target. */
3186 if (current_target
.to_stratum
>= process_stratum
)
3187 return current_target
.beneath
;
3189 return find_default_run_target ("file I/O");
3192 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3193 target file descriptor, or -1 if an error occurs (and set
3196 target_fileio_open (const char *filename
, int flags
, int mode
,
3199 struct target_ops
*t
;
3201 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3203 if (t
->to_fileio_open
!= NULL
)
3205 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3208 fprintf_unfiltered (gdb_stdlog
,
3209 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3210 filename
, flags
, mode
,
3211 fd
, fd
!= -1 ? 0 : *target_errno
);
3216 *target_errno
= FILEIO_ENOSYS
;
3220 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3221 Return the number of bytes written, or -1 if an error occurs
3222 (and set *TARGET_ERRNO). */
3224 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3225 ULONGEST offset
, int *target_errno
)
3227 struct target_ops
*t
;
3229 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3231 if (t
->to_fileio_pwrite
!= NULL
)
3233 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3237 fprintf_unfiltered (gdb_stdlog
,
3238 "target_fileio_pwrite (%d,...,%d,%s) "
3240 fd
, len
, pulongest (offset
),
3241 ret
, ret
!= -1 ? 0 : *target_errno
);
3246 *target_errno
= FILEIO_ENOSYS
;
3250 /* Read up to LEN bytes FD on the target into READ_BUF.
3251 Return the number of bytes read, or -1 if an error occurs
3252 (and set *TARGET_ERRNO). */
3254 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3255 ULONGEST offset
, int *target_errno
)
3257 struct target_ops
*t
;
3259 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3261 if (t
->to_fileio_pread
!= NULL
)
3263 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3267 fprintf_unfiltered (gdb_stdlog
,
3268 "target_fileio_pread (%d,...,%d,%s) "
3270 fd
, len
, pulongest (offset
),
3271 ret
, ret
!= -1 ? 0 : *target_errno
);
3276 *target_errno
= FILEIO_ENOSYS
;
3280 /* Close FD on the target. Return 0, or -1 if an error occurs
3281 (and set *TARGET_ERRNO). */
3283 target_fileio_close (int fd
, int *target_errno
)
3285 struct target_ops
*t
;
3287 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3289 if (t
->to_fileio_close
!= NULL
)
3291 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3294 fprintf_unfiltered (gdb_stdlog
,
3295 "target_fileio_close (%d) = %d (%d)\n",
3296 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3301 *target_errno
= FILEIO_ENOSYS
;
3305 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3306 occurs (and set *TARGET_ERRNO). */
3308 target_fileio_unlink (const char *filename
, int *target_errno
)
3310 struct target_ops
*t
;
3312 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3314 if (t
->to_fileio_unlink
!= NULL
)
3316 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3319 fprintf_unfiltered (gdb_stdlog
,
3320 "target_fileio_unlink (%s) = %d (%d)\n",
3321 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3326 *target_errno
= FILEIO_ENOSYS
;
3330 /* Read value of symbolic link FILENAME on the target. Return a
3331 null-terminated string allocated via xmalloc, or NULL if an error
3332 occurs (and set *TARGET_ERRNO). */
3334 target_fileio_readlink (const char *filename
, int *target_errno
)
3336 struct target_ops
*t
;
3338 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3340 if (t
->to_fileio_readlink
!= NULL
)
3342 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3345 fprintf_unfiltered (gdb_stdlog
,
3346 "target_fileio_readlink (%s) = %s (%d)\n",
3347 filename
, ret
? ret
: "(nil)",
3348 ret
? 0 : *target_errno
);
3353 *target_errno
= FILEIO_ENOSYS
;
3358 target_fileio_close_cleanup (void *opaque
)
3360 int fd
= *(int *) opaque
;
3363 target_fileio_close (fd
, &target_errno
);
3366 /* Read target file FILENAME. Store the result in *BUF_P and
3367 return the size of the transferred data. PADDING additional bytes are
3368 available in *BUF_P. This is a helper function for
3369 target_fileio_read_alloc; see the declaration of that function for more
3373 target_fileio_read_alloc_1 (const char *filename
,
3374 gdb_byte
**buf_p
, int padding
)
3376 struct cleanup
*close_cleanup
;
3377 size_t buf_alloc
, buf_pos
;
3383 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3387 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3389 /* Start by reading up to 4K at a time. The target will throttle
3390 this number down if necessary. */
3392 buf
= xmalloc (buf_alloc
);
3396 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3397 buf_alloc
- buf_pos
- padding
, buf_pos
,
3401 /* An error occurred. */
3402 do_cleanups (close_cleanup
);
3408 /* Read all there was. */
3409 do_cleanups (close_cleanup
);
3419 /* If the buffer is filling up, expand it. */
3420 if (buf_alloc
< buf_pos
* 2)
3423 buf
= xrealloc (buf
, buf_alloc
);
3430 /* Read target file FILENAME. Store the result in *BUF_P and return
3431 the size of the transferred data. See the declaration in "target.h"
3432 function for more information about the return value. */
3435 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3437 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3440 /* Read target file FILENAME. The result is NUL-terminated and
3441 returned as a string, allocated using xmalloc. If an error occurs
3442 or the transfer is unsupported, NULL is returned. Empty objects
3443 are returned as allocated but empty strings. A warning is issued
3444 if the result contains any embedded NUL bytes. */
3447 target_fileio_read_stralloc (const char *filename
)
3451 LONGEST i
, transferred
;
3453 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3454 bufstr
= (char *) buffer
;
3456 if (transferred
< 0)
3459 if (transferred
== 0)
3460 return xstrdup ("");
3462 bufstr
[transferred
] = 0;
3464 /* Check for embedded NUL bytes; but allow trailing NULs. */
3465 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3468 warning (_("target file %s "
3469 "contained unexpected null characters"),
3479 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3480 CORE_ADDR addr
, int len
)
3482 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3486 default_watchpoint_addr_within_range (struct target_ops
*target
,
3488 CORE_ADDR start
, int length
)
3490 return addr
>= start
&& addr
< start
+ length
;
3493 static struct gdbarch
*
3494 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3496 return target_gdbarch ();
3512 * Find the next target down the stack from the specified target.
3516 find_target_beneath (struct target_ops
*t
)
3524 find_target_at (enum strata stratum
)
3526 struct target_ops
*t
;
3528 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3529 if (t
->to_stratum
== stratum
)
3536 /* The inferior process has died. Long live the inferior! */
3539 generic_mourn_inferior (void)
3543 ptid
= inferior_ptid
;
3544 inferior_ptid
= null_ptid
;
3546 /* Mark breakpoints uninserted in case something tries to delete a
3547 breakpoint while we delete the inferior's threads (which would
3548 fail, since the inferior is long gone). */
3549 mark_breakpoints_out ();
3551 if (!ptid_equal (ptid
, null_ptid
))
3553 int pid
= ptid_get_pid (ptid
);
3554 exit_inferior (pid
);
3557 /* Note this wipes step-resume breakpoints, so needs to be done
3558 after exit_inferior, which ends up referencing the step-resume
3559 breakpoints through clear_thread_inferior_resources. */
3560 breakpoint_init_inferior (inf_exited
);
3562 registers_changed ();
3564 reopen_exec_file ();
3565 reinit_frame_cache ();
3567 if (deprecated_detach_hook
)
3568 deprecated_detach_hook ();
3571 /* Convert a normal process ID to a string. Returns the string in a
3575 normal_pid_to_str (ptid_t ptid
)
3577 static char buf
[32];
3579 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3584 dummy_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3586 return normal_pid_to_str (ptid
);
3589 /* Error-catcher for target_find_memory_regions. */
3591 dummy_find_memory_regions (struct target_ops
*self
,
3592 find_memory_region_ftype ignore1
, void *ignore2
)
3594 error (_("Command not implemented for this target."));
3598 /* Error-catcher for target_make_corefile_notes. */
3600 dummy_make_corefile_notes (struct target_ops
*self
,
3601 bfd
*ignore1
, int *ignore2
)
3603 error (_("Command not implemented for this target."));
3607 /* Set up the handful of non-empty slots needed by the dummy target
3611 init_dummy_target (void)
3613 dummy_target
.to_shortname
= "None";
3614 dummy_target
.to_longname
= "None";
3615 dummy_target
.to_doc
= "";
3616 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3617 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3618 dummy_target
.to_supports_disable_randomization
3619 = find_default_supports_disable_randomization
;
3620 dummy_target
.to_pid_to_str
= dummy_pid_to_str
;
3621 dummy_target
.to_stratum
= dummy_stratum
;
3622 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3623 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3624 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3625 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3626 dummy_target
.to_has_execution
3627 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3628 dummy_target
.to_magic
= OPS_MAGIC
;
3630 install_dummy_methods (&dummy_target
);
3634 debug_to_open (char *args
, int from_tty
)
3636 debug_target
.to_open (args
, from_tty
);
3638 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3642 target_close (struct target_ops
*targ
)
3644 gdb_assert (!target_is_pushed (targ
));
3646 if (targ
->to_xclose
!= NULL
)
3647 targ
->to_xclose (targ
);
3648 else if (targ
->to_close
!= NULL
)
3649 targ
->to_close (targ
);
3652 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3656 target_attach (char *args
, int from_tty
)
3658 current_target
.to_attach (¤t_target
, args
, from_tty
);
3660 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3665 target_thread_alive (ptid_t ptid
)
3667 struct target_ops
*t
;
3669 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3671 if (t
->to_thread_alive
!= NULL
)
3675 retval
= t
->to_thread_alive (t
, ptid
);
3677 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3678 ptid_get_pid (ptid
), retval
);
3688 target_find_new_threads (void)
3690 struct target_ops
*t
;
3692 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3694 if (t
->to_find_new_threads
!= NULL
)
3696 t
->to_find_new_threads (t
);
3698 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3706 target_stop (ptid_t ptid
)
3710 warning (_("May not interrupt or stop the target, ignoring attempt"));
3714 (*current_target
.to_stop
) (¤t_target
, ptid
);
3718 debug_to_post_attach (struct target_ops
*self
, int pid
)
3720 debug_target
.to_post_attach (&debug_target
, pid
);
3722 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3725 /* Concatenate ELEM to LIST, a comma separate list, and return the
3726 result. The LIST incoming argument is released. */
3729 str_comma_list_concat_elem (char *list
, const char *elem
)
3732 return xstrdup (elem
);
3734 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3737 /* Helper for target_options_to_string. If OPT is present in
3738 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3739 Returns the new resulting string. OPT is removed from
3743 do_option (int *target_options
, char *ret
,
3744 int opt
, char *opt_str
)
3746 if ((*target_options
& opt
) != 0)
3748 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3749 *target_options
&= ~opt
;
3756 target_options_to_string (int target_options
)
3760 #define DO_TARG_OPTION(OPT) \
3761 ret = do_option (&target_options, ret, OPT, #OPT)
3763 DO_TARG_OPTION (TARGET_WNOHANG
);
3765 if (target_options
!= 0)
3766 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3774 debug_print_register (const char * func
,
3775 struct regcache
*regcache
, int regno
)
3777 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3779 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3780 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3781 && gdbarch_register_name (gdbarch
, regno
) != NULL
3782 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3783 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3784 gdbarch_register_name (gdbarch
, regno
));
3786 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3787 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3789 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3790 int i
, size
= register_size (gdbarch
, regno
);
3791 gdb_byte buf
[MAX_REGISTER_SIZE
];
3793 regcache_raw_collect (regcache
, regno
, buf
);
3794 fprintf_unfiltered (gdb_stdlog
, " = ");
3795 for (i
= 0; i
< size
; i
++)
3797 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3799 if (size
<= sizeof (LONGEST
))
3801 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3803 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3804 core_addr_to_string_nz (val
), plongest (val
));
3807 fprintf_unfiltered (gdb_stdlog
, "\n");
3811 target_fetch_registers (struct regcache
*regcache
, int regno
)
3813 struct target_ops
*t
;
3815 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3817 if (t
->to_fetch_registers
!= NULL
)
3819 t
->to_fetch_registers (t
, regcache
, regno
);
3821 debug_print_register ("target_fetch_registers", regcache
, regno
);
3828 target_store_registers (struct regcache
*regcache
, int regno
)
3830 struct target_ops
*t
;
3832 if (!may_write_registers
)
3833 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3835 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3838 debug_print_register ("target_store_registers", regcache
, regno
);
3843 target_core_of_thread (ptid_t ptid
)
3845 struct target_ops
*t
;
3847 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3849 if (t
->to_core_of_thread
!= NULL
)
3851 int retval
= t
->to_core_of_thread (t
, ptid
);
3854 fprintf_unfiltered (gdb_stdlog
,
3855 "target_core_of_thread (%d) = %d\n",
3856 ptid_get_pid (ptid
), retval
);
3865 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3867 struct target_ops
*t
;
3869 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3871 if (t
->to_verify_memory
!= NULL
)
3873 int retval
= t
->to_verify_memory (t
, data
, memaddr
, size
);
3876 fprintf_unfiltered (gdb_stdlog
,
3877 "target_verify_memory (%s, %s) = %d\n",
3878 paddress (target_gdbarch (), memaddr
),
3888 /* The documentation for this function is in its prototype declaration in
3892 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3894 struct target_ops
*t
;
3896 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3897 if (t
->to_insert_mask_watchpoint
!= NULL
)
3901 ret
= t
->to_insert_mask_watchpoint (t
, addr
, mask
, rw
);
3904 fprintf_unfiltered (gdb_stdlog
, "\
3905 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3906 core_addr_to_string (addr
),
3907 core_addr_to_string (mask
), rw
, ret
);
3915 /* The documentation for this function is in its prototype declaration in
3919 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3921 struct target_ops
*t
;
3923 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3924 if (t
->to_remove_mask_watchpoint
!= NULL
)
3928 ret
= t
->to_remove_mask_watchpoint (t
, addr
, mask
, rw
);
3931 fprintf_unfiltered (gdb_stdlog
, "\
3932 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3933 core_addr_to_string (addr
),
3934 core_addr_to_string (mask
), rw
, ret
);
3942 /* The documentation for this function is in its prototype declaration
3946 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3948 struct target_ops
*t
;
3950 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3951 if (t
->to_masked_watch_num_registers
!= NULL
)
3952 return t
->to_masked_watch_num_registers (t
, addr
, mask
);
3957 /* The documentation for this function is in its prototype declaration
3961 target_ranged_break_num_registers (void)
3963 struct target_ops
*t
;
3965 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3966 if (t
->to_ranged_break_num_registers
!= NULL
)
3967 return t
->to_ranged_break_num_registers (t
);
3974 struct btrace_target_info
*
3975 target_enable_btrace (ptid_t ptid
)
3977 struct target_ops
*t
;
3979 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3980 if (t
->to_enable_btrace
!= NULL
)
3981 return t
->to_enable_btrace (t
, ptid
);
3990 target_disable_btrace (struct btrace_target_info
*btinfo
)
3992 struct target_ops
*t
;
3994 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3995 if (t
->to_disable_btrace
!= NULL
)
3997 t
->to_disable_btrace (t
, btinfo
);
4007 target_teardown_btrace (struct btrace_target_info
*btinfo
)
4009 struct target_ops
*t
;
4011 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4012 if (t
->to_teardown_btrace
!= NULL
)
4014 t
->to_teardown_btrace (t
, btinfo
);
4024 target_read_btrace (VEC (btrace_block_s
) **btrace
,
4025 struct btrace_target_info
*btinfo
,
4026 enum btrace_read_type type
)
4028 struct target_ops
*t
;
4030 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4031 if (t
->to_read_btrace
!= NULL
)
4032 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
4035 return BTRACE_ERR_NOT_SUPPORTED
;
4041 target_stop_recording (void)
4043 struct target_ops
*t
;
4045 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4046 if (t
->to_stop_recording
!= NULL
)
4048 t
->to_stop_recording (t
);
4052 /* This is optional. */
4058 target_info_record (void)
4060 struct target_ops
*t
;
4062 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4063 if (t
->to_info_record
!= NULL
)
4065 t
->to_info_record (t
);
4075 target_save_record (const char *filename
)
4077 struct target_ops
*t
;
4079 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4080 if (t
->to_save_record
!= NULL
)
4082 t
->to_save_record (t
, filename
);
4092 target_supports_delete_record (void)
4094 struct target_ops
*t
;
4096 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4097 if (t
->to_delete_record
!= NULL
)
4106 target_delete_record (void)
4108 struct target_ops
*t
;
4110 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4111 if (t
->to_delete_record
!= NULL
)
4113 t
->to_delete_record (t
);
4123 target_record_is_replaying (void)
4125 struct target_ops
*t
;
4127 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4128 if (t
->to_record_is_replaying
!= NULL
)
4129 return t
->to_record_is_replaying (t
);
4137 target_goto_record_begin (void)
4139 struct target_ops
*t
;
4141 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4142 if (t
->to_goto_record_begin
!= NULL
)
4144 t
->to_goto_record_begin (t
);
4154 target_goto_record_end (void)
4156 struct target_ops
*t
;
4158 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4159 if (t
->to_goto_record_end
!= NULL
)
4161 t
->to_goto_record_end (t
);
4171 target_goto_record (ULONGEST insn
)
4173 struct target_ops
*t
;
4175 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4176 if (t
->to_goto_record
!= NULL
)
4178 t
->to_goto_record (t
, insn
);
4188 target_insn_history (int size
, int flags
)
4190 struct target_ops
*t
;
4192 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4193 if (t
->to_insn_history
!= NULL
)
4195 t
->to_insn_history (t
, size
, flags
);
4205 target_insn_history_from (ULONGEST from
, int size
, int flags
)
4207 struct target_ops
*t
;
4209 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4210 if (t
->to_insn_history_from
!= NULL
)
4212 t
->to_insn_history_from (t
, from
, size
, flags
);
4222 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4224 struct target_ops
*t
;
4226 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4227 if (t
->to_insn_history_range
!= NULL
)
4229 t
->to_insn_history_range (t
, begin
, end
, flags
);
4239 target_call_history (int size
, int flags
)
4241 struct target_ops
*t
;
4243 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4244 if (t
->to_call_history
!= NULL
)
4246 t
->to_call_history (t
, size
, flags
);
4256 target_call_history_from (ULONGEST begin
, int size
, int flags
)
4258 struct target_ops
*t
;
4260 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4261 if (t
->to_call_history_from
!= NULL
)
4263 t
->to_call_history_from (t
, begin
, size
, flags
);
4273 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
4275 struct target_ops
*t
;
4277 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4278 if (t
->to_call_history_range
!= NULL
)
4280 t
->to_call_history_range (t
, begin
, end
, flags
);
4288 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
4290 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
4292 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4297 const struct frame_unwind
*
4298 target_get_unwinder (void)
4300 struct target_ops
*t
;
4302 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4303 if (t
->to_get_unwinder
!= NULL
)
4304 return t
->to_get_unwinder
;
4311 const struct frame_unwind
*
4312 target_get_tailcall_unwinder (void)
4314 struct target_ops
*t
;
4316 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4317 if (t
->to_get_tailcall_unwinder
!= NULL
)
4318 return t
->to_get_tailcall_unwinder
;
4326 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4327 struct gdbarch
*gdbarch
)
4329 for (; ops
!= NULL
; ops
= ops
->beneath
)
4330 if (ops
->to_decr_pc_after_break
!= NULL
)
4331 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4333 return gdbarch_decr_pc_after_break (gdbarch
);
4339 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4341 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4345 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4346 int write
, struct mem_attrib
*attrib
,
4347 struct target_ops
*target
)
4351 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4354 fprintf_unfiltered (gdb_stdlog
,
4355 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4356 paddress (target_gdbarch (), memaddr
), len
,
4357 write
? "write" : "read", retval
);
4363 fputs_unfiltered (", bytes =", gdb_stdlog
);
4364 for (i
= 0; i
< retval
; i
++)
4366 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4368 if (targetdebug
< 2 && i
> 0)
4370 fprintf_unfiltered (gdb_stdlog
, " ...");
4373 fprintf_unfiltered (gdb_stdlog
, "\n");
4376 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4380 fputc_unfiltered ('\n', gdb_stdlog
);
4386 debug_to_files_info (struct target_ops
*target
)
4388 debug_target
.to_files_info (target
);
4390 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4394 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4395 struct bp_target_info
*bp_tgt
)
4399 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4401 fprintf_unfiltered (gdb_stdlog
,
4402 "target_insert_breakpoint (%s, xxx) = %ld\n",
4403 core_addr_to_string (bp_tgt
->placed_address
),
4404 (unsigned long) retval
);
4409 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4410 struct bp_target_info
*bp_tgt
)
4414 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4416 fprintf_unfiltered (gdb_stdlog
,
4417 "target_remove_breakpoint (%s, xxx) = %ld\n",
4418 core_addr_to_string (bp_tgt
->placed_address
),
4419 (unsigned long) retval
);
4424 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4425 int type
, int cnt
, int from_tty
)
4429 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4430 type
, cnt
, from_tty
);
4432 fprintf_unfiltered (gdb_stdlog
,
4433 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4434 (unsigned long) type
,
4435 (unsigned long) cnt
,
4436 (unsigned long) from_tty
,
4437 (unsigned long) retval
);
4442 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4443 CORE_ADDR addr
, int len
)
4447 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4450 fprintf_unfiltered (gdb_stdlog
,
4451 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4452 core_addr_to_string (addr
), (unsigned long) len
,
4453 core_addr_to_string (retval
));
4458 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4459 CORE_ADDR addr
, int len
, int rw
,
4460 struct expression
*cond
)
4464 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4468 fprintf_unfiltered (gdb_stdlog
,
4469 "target_can_accel_watchpoint_condition "
4470 "(%s, %d, %d, %s) = %ld\n",
4471 core_addr_to_string (addr
), len
, rw
,
4472 host_address_to_string (cond
), (unsigned long) retval
);
4477 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4481 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4483 fprintf_unfiltered (gdb_stdlog
,
4484 "target_stopped_by_watchpoint () = %ld\n",
4485 (unsigned long) retval
);
4490 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4494 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4496 fprintf_unfiltered (gdb_stdlog
,
4497 "target_stopped_data_address ([%s]) = %ld\n",
4498 core_addr_to_string (*addr
),
4499 (unsigned long)retval
);
4504 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4506 CORE_ADDR start
, int length
)
4510 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4513 fprintf_filtered (gdb_stdlog
,
4514 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4515 core_addr_to_string (addr
), core_addr_to_string (start
),
4521 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4522 struct gdbarch
*gdbarch
,
4523 struct bp_target_info
*bp_tgt
)
4527 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4530 fprintf_unfiltered (gdb_stdlog
,
4531 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4532 core_addr_to_string (bp_tgt
->placed_address
),
4533 (unsigned long) retval
);
4538 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4539 struct gdbarch
*gdbarch
,
4540 struct bp_target_info
*bp_tgt
)
4544 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4547 fprintf_unfiltered (gdb_stdlog
,
4548 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4549 core_addr_to_string (bp_tgt
->placed_address
),
4550 (unsigned long) retval
);
4555 debug_to_insert_watchpoint (struct target_ops
*self
,
4556 CORE_ADDR addr
, int len
, int type
,
4557 struct expression
*cond
)
4561 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4562 addr
, len
, type
, cond
);
4564 fprintf_unfiltered (gdb_stdlog
,
4565 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4566 core_addr_to_string (addr
), len
, type
,
4567 host_address_to_string (cond
), (unsigned long) retval
);
4572 debug_to_remove_watchpoint (struct target_ops
*self
,
4573 CORE_ADDR addr
, int len
, int type
,
4574 struct expression
*cond
)
4578 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4579 addr
, len
, type
, cond
);
4581 fprintf_unfiltered (gdb_stdlog
,
4582 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4583 core_addr_to_string (addr
), len
, type
,
4584 host_address_to_string (cond
), (unsigned long) retval
);
4589 debug_to_terminal_init (struct target_ops
*self
)
4591 debug_target
.to_terminal_init (&debug_target
);
4593 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4597 debug_to_terminal_inferior (struct target_ops
*self
)
4599 debug_target
.to_terminal_inferior (&debug_target
);
4601 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4605 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4607 debug_target
.to_terminal_ours_for_output (&debug_target
);
4609 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4613 debug_to_terminal_ours (struct target_ops
*self
)
4615 debug_target
.to_terminal_ours (&debug_target
);
4617 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4621 debug_to_terminal_save_ours (struct target_ops
*self
)
4623 debug_target
.to_terminal_save_ours (&debug_target
);
4625 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4629 debug_to_terminal_info (struct target_ops
*self
,
4630 const char *arg
, int from_tty
)
4632 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4634 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4639 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4641 debug_target
.to_load (&debug_target
, args
, from_tty
);
4643 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4647 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4649 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4651 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4652 ptid_get_pid (ptid
));
4656 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4660 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4662 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4669 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4673 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4675 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4682 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4686 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4688 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4695 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4699 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4701 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4708 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4712 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4714 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4721 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4725 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4727 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4734 debug_to_has_exited (struct target_ops
*self
,
4735 int pid
, int wait_status
, int *exit_status
)
4739 has_exited
= debug_target
.to_has_exited (&debug_target
,
4740 pid
, wait_status
, exit_status
);
4742 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4743 pid
, wait_status
, *exit_status
, has_exited
);
4749 debug_to_can_run (struct target_ops
*self
)
4753 retval
= debug_target
.to_can_run (&debug_target
);
4755 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4760 static struct gdbarch
*
4761 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4763 struct gdbarch
*retval
;
4765 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4767 fprintf_unfiltered (gdb_stdlog
,
4768 "target_thread_architecture (%s) = %s [%s]\n",
4769 target_pid_to_str (ptid
),
4770 host_address_to_string (retval
),
4771 gdbarch_bfd_arch_info (retval
)->printable_name
);
4776 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4778 debug_target
.to_stop (&debug_target
, ptid
);
4780 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4781 target_pid_to_str (ptid
));
4785 debug_to_rcmd (struct target_ops
*self
, char *command
,
4786 struct ui_file
*outbuf
)
4788 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4789 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4793 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4797 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4799 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4806 setup_target_debug (void)
4808 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4810 current_target
.to_open
= debug_to_open
;
4811 current_target
.to_post_attach
= debug_to_post_attach
;
4812 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4813 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4814 current_target
.to_files_info
= debug_to_files_info
;
4815 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4816 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4817 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4818 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4819 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4820 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4821 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4822 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4823 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4824 current_target
.to_watchpoint_addr_within_range
4825 = debug_to_watchpoint_addr_within_range
;
4826 current_target
.to_region_ok_for_hw_watchpoint
4827 = debug_to_region_ok_for_hw_watchpoint
;
4828 current_target
.to_can_accel_watchpoint_condition
4829 = debug_to_can_accel_watchpoint_condition
;
4830 current_target
.to_terminal_init
= debug_to_terminal_init
;
4831 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4832 current_target
.to_terminal_ours_for_output
4833 = debug_to_terminal_ours_for_output
;
4834 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4835 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4836 current_target
.to_terminal_info
= debug_to_terminal_info
;
4837 current_target
.to_load
= debug_to_load
;
4838 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4839 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4840 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4841 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4842 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4843 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4844 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4845 current_target
.to_has_exited
= debug_to_has_exited
;
4846 current_target
.to_can_run
= debug_to_can_run
;
4847 current_target
.to_stop
= debug_to_stop
;
4848 current_target
.to_rcmd
= debug_to_rcmd
;
4849 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4850 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4854 static char targ_desc
[] =
4855 "Names of targets and files being debugged.\nShows the entire \
4856 stack of targets currently in use (including the exec-file,\n\
4857 core-file, and process, if any), as well as the symbol file name.";
4860 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4862 error (_("\"monitor\" command not supported by this target."));
4866 do_monitor_command (char *cmd
,
4869 target_rcmd (cmd
, gdb_stdtarg
);
4872 /* Print the name of each layers of our target stack. */
4875 maintenance_print_target_stack (char *cmd
, int from_tty
)
4877 struct target_ops
*t
;
4879 printf_filtered (_("The current target stack is:\n"));
4881 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4883 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4887 /* Controls if async mode is permitted. */
4888 int target_async_permitted
= 0;
4890 /* The set command writes to this variable. If the inferior is
4891 executing, target_async_permitted is *not* updated. */
4892 static int target_async_permitted_1
= 0;
4895 set_target_async_command (char *args
, int from_tty
,
4896 struct cmd_list_element
*c
)
4898 if (have_live_inferiors ())
4900 target_async_permitted_1
= target_async_permitted
;
4901 error (_("Cannot change this setting while the inferior is running."));
4904 target_async_permitted
= target_async_permitted_1
;
4908 show_target_async_command (struct ui_file
*file
, int from_tty
,
4909 struct cmd_list_element
*c
,
4912 fprintf_filtered (file
,
4913 _("Controlling the inferior in "
4914 "asynchronous mode is %s.\n"), value
);
4917 /* Temporary copies of permission settings. */
4919 static int may_write_registers_1
= 1;
4920 static int may_write_memory_1
= 1;
4921 static int may_insert_breakpoints_1
= 1;
4922 static int may_insert_tracepoints_1
= 1;
4923 static int may_insert_fast_tracepoints_1
= 1;
4924 static int may_stop_1
= 1;
4926 /* Make the user-set values match the real values again. */
4929 update_target_permissions (void)
4931 may_write_registers_1
= may_write_registers
;
4932 may_write_memory_1
= may_write_memory
;
4933 may_insert_breakpoints_1
= may_insert_breakpoints
;
4934 may_insert_tracepoints_1
= may_insert_tracepoints
;
4935 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4936 may_stop_1
= may_stop
;
4939 /* The one function handles (most of) the permission flags in the same
4943 set_target_permissions (char *args
, int from_tty
,
4944 struct cmd_list_element
*c
)
4946 if (target_has_execution
)
4948 update_target_permissions ();
4949 error (_("Cannot change this setting while the inferior is running."));
4952 /* Make the real values match the user-changed values. */
4953 may_write_registers
= may_write_registers_1
;
4954 may_insert_breakpoints
= may_insert_breakpoints_1
;
4955 may_insert_tracepoints
= may_insert_tracepoints_1
;
4956 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4957 may_stop
= may_stop_1
;
4958 update_observer_mode ();
4961 /* Set memory write permission independently of observer mode. */
4964 set_write_memory_permission (char *args
, int from_tty
,
4965 struct cmd_list_element
*c
)
4967 /* Make the real values match the user-changed values. */
4968 may_write_memory
= may_write_memory_1
;
4969 update_observer_mode ();
4974 initialize_targets (void)
4976 init_dummy_target ();
4977 push_target (&dummy_target
);
4979 add_info ("target", target_info
, targ_desc
);
4980 add_info ("files", target_info
, targ_desc
);
4982 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4983 Set target debugging."), _("\
4984 Show target debugging."), _("\
4985 When non-zero, target debugging is enabled. Higher numbers are more\n\
4986 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4990 &setdebuglist
, &showdebuglist
);
4992 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4993 &trust_readonly
, _("\
4994 Set mode for reading from readonly sections."), _("\
4995 Show mode for reading from readonly sections."), _("\
4996 When this mode is on, memory reads from readonly sections (such as .text)\n\
4997 will be read from the object file instead of from the target. This will\n\
4998 result in significant performance improvement for remote targets."),
5000 show_trust_readonly
,
5001 &setlist
, &showlist
);
5003 add_com ("monitor", class_obscure
, do_monitor_command
,
5004 _("Send a command to the remote monitor (remote targets only)."));
5006 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
5007 _("Print the name of each layer of the internal target stack."),
5008 &maintenanceprintlist
);
5010 add_setshow_boolean_cmd ("target-async", no_class
,
5011 &target_async_permitted_1
, _("\
5012 Set whether gdb controls the inferior in asynchronous mode."), _("\
5013 Show whether gdb controls the inferior in asynchronous mode."), _("\
5014 Tells gdb whether to control the inferior in asynchronous mode."),
5015 set_target_async_command
,
5016 show_target_async_command
,
5020 add_setshow_boolean_cmd ("may-write-registers", class_support
,
5021 &may_write_registers_1
, _("\
5022 Set permission to write into registers."), _("\
5023 Show permission to write into registers."), _("\
5024 When this permission is on, GDB may write into the target's registers.\n\
5025 Otherwise, any sort of write attempt will result in an error."),
5026 set_target_permissions
, NULL
,
5027 &setlist
, &showlist
);
5029 add_setshow_boolean_cmd ("may-write-memory", class_support
,
5030 &may_write_memory_1
, _("\
5031 Set permission to write into target memory."), _("\
5032 Show permission to write into target memory."), _("\
5033 When this permission is on, GDB may write into the target's memory.\n\
5034 Otherwise, any sort of write attempt will result in an error."),
5035 set_write_memory_permission
, NULL
,
5036 &setlist
, &showlist
);
5038 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
5039 &may_insert_breakpoints_1
, _("\
5040 Set permission to insert breakpoints in the target."), _("\
5041 Show permission to insert breakpoints in the target."), _("\
5042 When this permission is on, GDB may insert breakpoints in the program.\n\
5043 Otherwise, any sort of insertion attempt will result in an error."),
5044 set_target_permissions
, NULL
,
5045 &setlist
, &showlist
);
5047 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
5048 &may_insert_tracepoints_1
, _("\
5049 Set permission to insert tracepoints in the target."), _("\
5050 Show permission to insert tracepoints in the target."), _("\
5051 When this permission is on, GDB may insert tracepoints in the program.\n\
5052 Otherwise, any sort of insertion attempt will result in an error."),
5053 set_target_permissions
, NULL
,
5054 &setlist
, &showlist
);
5056 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
5057 &may_insert_fast_tracepoints_1
, _("\
5058 Set permission to insert fast tracepoints in the target."), _("\
5059 Show permission to insert fast tracepoints in the target."), _("\
5060 When this permission is on, GDB may insert fast tracepoints.\n\
5061 Otherwise, any sort of insertion attempt will result in an error."),
5062 set_target_permissions
, NULL
,
5063 &setlist
, &showlist
);
5065 add_setshow_boolean_cmd ("may-interrupt", class_support
,
5067 Set permission to interrupt or signal the target."), _("\
5068 Show permission to interrupt or signal the target."), _("\
5069 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5070 Otherwise, any attempt to interrupt or stop will be ignored."),
5071 set_target_permissions
, NULL
,
5072 &setlist
, &showlist
);