1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
37 #include "gdb_assert.h"
39 #include "exceptions.h"
40 #include "target-descriptions.h"
41 #include "gdbthread.h"
44 #include "inline-frame.h"
45 #include "tracepoint.h"
46 #include "gdb/fileio.h"
50 static void target_info (char *, int);
52 static void generic_tls_error (void) ATTRIBUTE_NORETURN
;
54 static void default_terminal_info (struct target_ops
*, const char *, int);
56 static int default_watchpoint_addr_within_range (struct target_ops
*,
57 CORE_ADDR
, CORE_ADDR
, int);
59 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
62 static void default_rcmd (struct target_ops
*, const char *, struct ui_file
*);
64 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
67 static int default_follow_fork (struct target_ops
*self
, int follow_child
,
70 static void default_mourn_inferior (struct target_ops
*self
);
72 static int default_search_memory (struct target_ops
*ops
,
74 ULONGEST search_space_len
,
75 const gdb_byte
*pattern
,
77 CORE_ADDR
*found_addrp
);
79 static int default_verify_memory (struct target_ops
*self
,
81 CORE_ADDR memaddr
, ULONGEST size
);
83 static struct address_space
*default_thread_address_space
84 (struct target_ops
*self
, ptid_t ptid
);
86 static void tcomplain (void) ATTRIBUTE_NORETURN
;
88 static int return_zero (struct target_ops
*);
90 static int return_zero_has_execution (struct target_ops
*, ptid_t
);
92 static void target_command (char *, int);
94 static struct target_ops
*find_default_run_target (char *);
96 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
99 static int dummy_find_memory_regions (struct target_ops
*self
,
100 find_memory_region_ftype ignore1
,
103 static char *dummy_make_corefile_notes (struct target_ops
*self
,
104 bfd
*ignore1
, int *ignore2
);
106 static char *default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
);
108 static enum exec_direction_kind default_execution_direction
109 (struct target_ops
*self
);
111 static CORE_ADDR
default_target_decr_pc_after_break (struct target_ops
*ops
,
112 struct gdbarch
*gdbarch
);
114 #include "target-delegates.c"
116 static void init_dummy_target (void);
118 static struct target_ops debug_target
;
120 static void debug_to_open (char *, int);
122 static void debug_to_prepare_to_store (struct target_ops
*self
,
125 static void debug_to_files_info (struct target_ops
*);
127 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
128 struct bp_target_info
*);
130 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
131 struct bp_target_info
*);
133 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
136 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
138 struct bp_target_info
*);
140 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
142 struct bp_target_info
*);
144 static int debug_to_insert_watchpoint (struct target_ops
*self
,
146 struct expression
*);
148 static int debug_to_remove_watchpoint (struct target_ops
*self
,
150 struct expression
*);
152 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
154 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
155 CORE_ADDR
, CORE_ADDR
, int);
157 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
160 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
162 struct expression
*);
164 static void debug_to_terminal_init (struct target_ops
*self
);
166 static void debug_to_terminal_inferior (struct target_ops
*self
);
168 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
170 static void debug_to_terminal_save_ours (struct target_ops
*self
);
172 static void debug_to_terminal_ours (struct target_ops
*self
);
174 static void debug_to_load (struct target_ops
*self
, const char *, int);
176 static int debug_to_can_run (struct target_ops
*self
);
178 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
180 /* Pointer to array of target architecture structures; the size of the
181 array; the current index into the array; the allocated size of the
183 struct target_ops
**target_structs
;
184 unsigned target_struct_size
;
185 unsigned target_struct_allocsize
;
186 #define DEFAULT_ALLOCSIZE 10
188 /* The initial current target, so that there is always a semi-valid
191 static struct target_ops dummy_target
;
193 /* Top of target stack. */
195 static struct target_ops
*target_stack
;
197 /* The target structure we are currently using to talk to a process
198 or file or whatever "inferior" we have. */
200 struct target_ops current_target
;
202 /* Command list for target. */
204 static struct cmd_list_element
*targetlist
= NULL
;
206 /* Nonzero if we should trust readonly sections from the
207 executable when reading memory. */
209 static int trust_readonly
= 0;
211 /* Nonzero if we should show true memory content including
212 memory breakpoint inserted by gdb. */
214 static int show_memory_breakpoints
= 0;
216 /* These globals control whether GDB attempts to perform these
217 operations; they are useful for targets that need to prevent
218 inadvertant disruption, such as in non-stop mode. */
220 int may_write_registers
= 1;
222 int may_write_memory
= 1;
224 int may_insert_breakpoints
= 1;
226 int may_insert_tracepoints
= 1;
228 int may_insert_fast_tracepoints
= 1;
232 /* Non-zero if we want to see trace of target level stuff. */
234 static unsigned int targetdebug
= 0;
236 show_targetdebug (struct ui_file
*file
, int from_tty
,
237 struct cmd_list_element
*c
, const char *value
)
239 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
242 static void setup_target_debug (void);
244 /* The user just typed 'target' without the name of a target. */
247 target_command (char *arg
, int from_tty
)
249 fputs_filtered ("Argument required (target name). Try `help target'\n",
253 /* Default target_has_* methods for process_stratum targets. */
256 default_child_has_all_memory (struct target_ops
*ops
)
258 /* If no inferior selected, then we can't read memory here. */
259 if (ptid_equal (inferior_ptid
, null_ptid
))
266 default_child_has_memory (struct target_ops
*ops
)
268 /* If no inferior selected, then we can't read memory here. */
269 if (ptid_equal (inferior_ptid
, null_ptid
))
276 default_child_has_stack (struct target_ops
*ops
)
278 /* If no inferior selected, there's no stack. */
279 if (ptid_equal (inferior_ptid
, null_ptid
))
286 default_child_has_registers (struct target_ops
*ops
)
288 /* Can't read registers from no inferior. */
289 if (ptid_equal (inferior_ptid
, null_ptid
))
296 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
298 /* If there's no thread selected, then we can't make it run through
300 if (ptid_equal (the_ptid
, null_ptid
))
308 target_has_all_memory_1 (void)
310 struct target_ops
*t
;
312 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
313 if (t
->to_has_all_memory (t
))
320 target_has_memory_1 (void)
322 struct target_ops
*t
;
324 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
325 if (t
->to_has_memory (t
))
332 target_has_stack_1 (void)
334 struct target_ops
*t
;
336 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
337 if (t
->to_has_stack (t
))
344 target_has_registers_1 (void)
346 struct target_ops
*t
;
348 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
349 if (t
->to_has_registers (t
))
356 target_has_execution_1 (ptid_t the_ptid
)
358 struct target_ops
*t
;
360 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
361 if (t
->to_has_execution (t
, the_ptid
))
368 target_has_execution_current (void)
370 return target_has_execution_1 (inferior_ptid
);
373 /* Complete initialization of T. This ensures that various fields in
374 T are set, if needed by the target implementation. */
377 complete_target_initialization (struct target_ops
*t
)
379 /* Provide default values for all "must have" methods. */
381 if (t
->to_has_all_memory
== NULL
)
382 t
->to_has_all_memory
= return_zero
;
384 if (t
->to_has_memory
== NULL
)
385 t
->to_has_memory
= return_zero
;
387 if (t
->to_has_stack
== NULL
)
388 t
->to_has_stack
= return_zero
;
390 if (t
->to_has_registers
== NULL
)
391 t
->to_has_registers
= return_zero
;
393 if (t
->to_has_execution
== NULL
)
394 t
->to_has_execution
= return_zero_has_execution
;
396 /* These methods can be called on an unpushed target and so require
397 a default implementation if the target might plausibly be the
398 default run target. */
399 gdb_assert (t
->to_can_run
== NULL
|| (t
->to_can_async_p
!= NULL
400 && t
->to_supports_non_stop
!= NULL
));
402 install_delegators (t
);
405 /* Add possible target architecture T to the list and add a new
406 command 'target T->to_shortname'. Set COMPLETER as the command's
407 completer if not NULL. */
410 add_target_with_completer (struct target_ops
*t
,
411 completer_ftype
*completer
)
413 struct cmd_list_element
*c
;
415 complete_target_initialization (t
);
419 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
420 target_structs
= (struct target_ops
**) xmalloc
421 (target_struct_allocsize
* sizeof (*target_structs
));
423 if (target_struct_size
>= target_struct_allocsize
)
425 target_struct_allocsize
*= 2;
426 target_structs
= (struct target_ops
**)
427 xrealloc ((char *) target_structs
,
428 target_struct_allocsize
* sizeof (*target_structs
));
430 target_structs
[target_struct_size
++] = t
;
432 if (targetlist
== NULL
)
433 add_prefix_cmd ("target", class_run
, target_command
, _("\
434 Connect to a target machine or process.\n\
435 The first argument is the type or protocol of the target machine.\n\
436 Remaining arguments are interpreted by the target protocol. For more\n\
437 information on the arguments for a particular protocol, type\n\
438 `help target ' followed by the protocol name."),
439 &targetlist
, "target ", 0, &cmdlist
);
440 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
442 if (completer
!= NULL
)
443 set_cmd_completer (c
, completer
);
446 /* Add a possible target architecture to the list. */
449 add_target (struct target_ops
*t
)
451 add_target_with_completer (t
, NULL
);
457 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
459 struct cmd_list_element
*c
;
462 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
464 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
465 alt
= xstrprintf ("target %s", t
->to_shortname
);
466 deprecate_cmd (c
, alt
);
475 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
477 current_target
.to_kill (¤t_target
);
481 target_load (const char *arg
, int from_tty
)
483 target_dcache_invalidate ();
484 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
488 target_terminal_inferior (void)
490 /* A background resume (``run&'') should leave GDB in control of the
491 terminal. Use target_can_async_p, not target_is_async_p, since at
492 this point the target is not async yet. However, if sync_execution
493 is not set, we know it will become async prior to resume. */
494 if (target_can_async_p () && !sync_execution
)
497 /* If GDB is resuming the inferior in the foreground, install
498 inferior's terminal modes. */
499 (*current_target
.to_terminal_inferior
) (¤t_target
);
505 error (_("You can't do that when your target is `%s'"),
506 current_target
.to_shortname
);
512 error (_("You can't do that without a process to debug."));
516 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
518 printf_unfiltered (_("No saved terminal information.\n"));
521 /* A default implementation for the to_get_ada_task_ptid target method.
523 This function builds the PTID by using both LWP and TID as part of
524 the PTID lwp and tid elements. The pid used is the pid of the
528 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
530 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
533 static enum exec_direction_kind
534 default_execution_direction (struct target_ops
*self
)
536 if (!target_can_execute_reverse
)
538 else if (!target_can_async_p ())
541 gdb_assert_not_reached ("\
542 to_execution_direction must be implemented for reverse async");
545 /* Go through the target stack from top to bottom, copying over zero
546 entries in current_target, then filling in still empty entries. In
547 effect, we are doing class inheritance through the pushed target
550 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
551 is currently implemented, is that it discards any knowledge of
552 which target an inherited method originally belonged to.
553 Consequently, new new target methods should instead explicitly and
554 locally search the target stack for the target that can handle the
558 update_current_target (void)
560 struct target_ops
*t
;
562 /* First, reset current's contents. */
563 memset (¤t_target
, 0, sizeof (current_target
));
565 /* Install the delegators. */
566 install_delegators (¤t_target
);
568 current_target
.to_stratum
= target_stack
->to_stratum
;
570 #define INHERIT(FIELD, TARGET) \
571 if (!current_target.FIELD) \
572 current_target.FIELD = (TARGET)->FIELD
574 /* Do not add any new INHERITs here. Instead, use the delegation
575 mechanism provided by make-target-delegates. */
576 for (t
= target_stack
; t
; t
= t
->beneath
)
578 INHERIT (to_shortname
, t
);
579 INHERIT (to_longname
, t
);
580 INHERIT (to_attach_no_wait
, t
);
581 INHERIT (to_have_steppable_watchpoint
, t
);
582 INHERIT (to_have_continuable_watchpoint
, t
);
583 INHERIT (to_has_thread_control
, t
);
587 /* Finally, position the target-stack beneath the squashed
588 "current_target". That way code looking for a non-inherited
589 target method can quickly and simply find it. */
590 current_target
.beneath
= target_stack
;
593 setup_target_debug ();
596 /* Push a new target type into the stack of the existing target accessors,
597 possibly superseding some of the existing accessors.
599 Rather than allow an empty stack, we always have the dummy target at
600 the bottom stratum, so we can call the function vectors without
604 push_target (struct target_ops
*t
)
606 struct target_ops
**cur
;
608 /* Check magic number. If wrong, it probably means someone changed
609 the struct definition, but not all the places that initialize one. */
610 if (t
->to_magic
!= OPS_MAGIC
)
612 fprintf_unfiltered (gdb_stderr
,
613 "Magic number of %s target struct wrong\n",
615 internal_error (__FILE__
, __LINE__
,
616 _("failed internal consistency check"));
619 /* Find the proper stratum to install this target in. */
620 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
622 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
626 /* If there's already targets at this stratum, remove them. */
627 /* FIXME: cagney/2003-10-15: I think this should be popping all
628 targets to CUR, and not just those at this stratum level. */
629 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
631 /* There's already something at this stratum level. Close it,
632 and un-hook it from the stack. */
633 struct target_ops
*tmp
= (*cur
);
635 (*cur
) = (*cur
)->beneath
;
640 /* We have removed all targets in our stratum, now add the new one. */
644 update_current_target ();
647 /* Remove a target_ops vector from the stack, wherever it may be.
648 Return how many times it was removed (0 or 1). */
651 unpush_target (struct target_ops
*t
)
653 struct target_ops
**cur
;
654 struct target_ops
*tmp
;
656 if (t
->to_stratum
== dummy_stratum
)
657 internal_error (__FILE__
, __LINE__
,
658 _("Attempt to unpush the dummy target"));
660 /* Look for the specified target. Note that we assume that a target
661 can only occur once in the target stack. */
663 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
669 /* If we don't find target_ops, quit. Only open targets should be
674 /* Unchain the target. */
676 (*cur
) = (*cur
)->beneath
;
679 update_current_target ();
681 /* Finally close the target. Note we do this after unchaining, so
682 any target method calls from within the target_close
683 implementation don't end up in T anymore. */
690 pop_all_targets_above (enum strata above_stratum
)
692 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
694 if (!unpush_target (target_stack
))
696 fprintf_unfiltered (gdb_stderr
,
697 "pop_all_targets couldn't find target %s\n",
698 target_stack
->to_shortname
);
699 internal_error (__FILE__
, __LINE__
,
700 _("failed internal consistency check"));
707 pop_all_targets (void)
709 pop_all_targets_above (dummy_stratum
);
712 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
715 target_is_pushed (struct target_ops
*t
)
717 struct target_ops
**cur
;
719 /* Check magic number. If wrong, it probably means someone changed
720 the struct definition, but not all the places that initialize one. */
721 if (t
->to_magic
!= OPS_MAGIC
)
723 fprintf_unfiltered (gdb_stderr
,
724 "Magic number of %s target struct wrong\n",
726 internal_error (__FILE__
, __LINE__
,
727 _("failed internal consistency check"));
730 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
737 /* Default implementation of to_get_thread_local_address. */
740 generic_tls_error (void)
742 throw_error (TLS_GENERIC_ERROR
,
743 _("Cannot find thread-local variables on this target"));
746 /* Using the objfile specified in OBJFILE, find the address for the
747 current thread's thread-local storage with offset OFFSET. */
749 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
751 volatile CORE_ADDR addr
= 0;
752 struct target_ops
*target
= ¤t_target
;
754 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
756 ptid_t ptid
= inferior_ptid
;
757 volatile struct gdb_exception ex
;
759 TRY_CATCH (ex
, RETURN_MASK_ALL
)
763 /* Fetch the load module address for this objfile. */
764 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
767 addr
= target
->to_get_thread_local_address (target
, ptid
,
770 /* If an error occurred, print TLS related messages here. Otherwise,
771 throw the error to some higher catcher. */
774 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
778 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
779 error (_("Cannot find thread-local variables "
780 "in this thread library."));
782 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
783 if (objfile_is_library
)
784 error (_("Cannot find shared library `%s' in dynamic"
785 " linker's load module list"), objfile_name (objfile
));
787 error (_("Cannot find executable file `%s' in dynamic"
788 " linker's load module list"), objfile_name (objfile
));
790 case TLS_NOT_ALLOCATED_YET_ERROR
:
791 if (objfile_is_library
)
792 error (_("The inferior has not yet allocated storage for"
793 " thread-local variables in\n"
794 "the shared library `%s'\n"
796 objfile_name (objfile
), target_pid_to_str (ptid
));
798 error (_("The inferior has not yet allocated storage for"
799 " thread-local variables in\n"
800 "the executable `%s'\n"
802 objfile_name (objfile
), target_pid_to_str (ptid
));
804 case TLS_GENERIC_ERROR
:
805 if (objfile_is_library
)
806 error (_("Cannot find thread-local storage for %s, "
807 "shared library %s:\n%s"),
808 target_pid_to_str (ptid
),
809 objfile_name (objfile
), ex
.message
);
811 error (_("Cannot find thread-local storage for %s, "
812 "executable file %s:\n%s"),
813 target_pid_to_str (ptid
),
814 objfile_name (objfile
), ex
.message
);
817 throw_exception (ex
);
822 /* It wouldn't be wrong here to try a gdbarch method, too; finding
823 TLS is an ABI-specific thing. But we don't do that yet. */
825 error (_("Cannot find thread-local variables on this target"));
831 target_xfer_status_to_string (enum target_xfer_status status
)
833 #define CASE(X) case X: return #X
836 CASE(TARGET_XFER_E_IO
);
837 CASE(TARGET_XFER_UNAVAILABLE
);
846 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
848 /* target_read_string -- read a null terminated string, up to LEN bytes,
849 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
850 Set *STRING to a pointer to malloc'd memory containing the data; the caller
851 is responsible for freeing it. Return the number of bytes successfully
855 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
861 int buffer_allocated
;
863 unsigned int nbytes_read
= 0;
867 /* Small for testing. */
868 buffer_allocated
= 4;
869 buffer
= xmalloc (buffer_allocated
);
874 tlen
= MIN (len
, 4 - (memaddr
& 3));
875 offset
= memaddr
& 3;
877 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
880 /* The transfer request might have crossed the boundary to an
881 unallocated region of memory. Retry the transfer, requesting
885 errcode
= target_read_memory (memaddr
, buf
, 1);
890 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
894 bytes
= bufptr
- buffer
;
895 buffer_allocated
*= 2;
896 buffer
= xrealloc (buffer
, buffer_allocated
);
897 bufptr
= buffer
+ bytes
;
900 for (i
= 0; i
< tlen
; i
++)
902 *bufptr
++ = buf
[i
+ offset
];
903 if (buf
[i
+ offset
] == '\000')
905 nbytes_read
+= i
+ 1;
921 struct target_section_table
*
922 target_get_section_table (struct target_ops
*target
)
925 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
927 return (*target
->to_get_section_table
) (target
);
930 /* Find a section containing ADDR. */
932 struct target_section
*
933 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
935 struct target_section_table
*table
= target_get_section_table (target
);
936 struct target_section
*secp
;
941 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
943 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
949 /* Read memory from more than one valid target. A core file, for
950 instance, could have some of memory but delegate other bits to
951 the target below it. So, we must manually try all targets. */
953 static enum target_xfer_status
954 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
955 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
956 ULONGEST
*xfered_len
)
958 enum target_xfer_status res
;
962 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
963 readbuf
, writebuf
, memaddr
, len
,
965 if (res
== TARGET_XFER_OK
)
968 /* Stop if the target reports that the memory is not available. */
969 if (res
== TARGET_XFER_UNAVAILABLE
)
972 /* We want to continue past core files to executables, but not
973 past a running target's memory. */
974 if (ops
->to_has_all_memory (ops
))
981 /* The cache works at the raw memory level. Make sure the cache
982 gets updated with raw contents no matter what kind of memory
983 object was originally being written. Note we do write-through
984 first, so that if it fails, we don't write to the cache contents
985 that never made it to the target. */
987 && !ptid_equal (inferior_ptid
, null_ptid
)
988 && target_dcache_init_p ()
989 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
991 DCACHE
*dcache
= target_dcache_get ();
993 /* Note that writing to an area of memory which wasn't present
994 in the cache doesn't cause it to be loaded in. */
995 dcache_update (dcache
, res
, memaddr
, writebuf
, *xfered_len
);
1001 /* Perform a partial memory transfer.
1002 For docs see target.h, to_xfer_partial. */
1004 static enum target_xfer_status
1005 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1006 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1007 ULONGEST len
, ULONGEST
*xfered_len
)
1009 enum target_xfer_status res
;
1011 struct mem_region
*region
;
1012 struct inferior
*inf
;
1014 /* For accesses to unmapped overlay sections, read directly from
1015 files. Must do this first, as MEMADDR may need adjustment. */
1016 if (readbuf
!= NULL
&& overlay_debugging
)
1018 struct obj_section
*section
= find_pc_overlay (memaddr
);
1020 if (pc_in_unmapped_range (memaddr
, section
))
1022 struct target_section_table
*table
1023 = target_get_section_table (ops
);
1024 const char *section_name
= section
->the_bfd_section
->name
;
1026 memaddr
= overlay_mapped_address (memaddr
, section
);
1027 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1028 memaddr
, len
, xfered_len
,
1030 table
->sections_end
,
1035 /* Try the executable files, if "trust-readonly-sections" is set. */
1036 if (readbuf
!= NULL
&& trust_readonly
)
1038 struct target_section
*secp
;
1039 struct target_section_table
*table
;
1041 secp
= target_section_by_addr (ops
, memaddr
);
1043 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1044 secp
->the_bfd_section
)
1047 table
= target_get_section_table (ops
);
1048 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1049 memaddr
, len
, xfered_len
,
1051 table
->sections_end
,
1056 /* Try GDB's internal data cache. */
1057 region
= lookup_mem_region (memaddr
);
1058 /* region->hi == 0 means there's no upper bound. */
1059 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1062 reg_len
= region
->hi
- memaddr
;
1064 switch (region
->attrib
.mode
)
1067 if (writebuf
!= NULL
)
1068 return TARGET_XFER_E_IO
;
1072 if (readbuf
!= NULL
)
1073 return TARGET_XFER_E_IO
;
1077 /* We only support writing to flash during "load" for now. */
1078 if (writebuf
!= NULL
)
1079 error (_("Writing to flash memory forbidden in this context"));
1083 return TARGET_XFER_E_IO
;
1086 if (!ptid_equal (inferior_ptid
, null_ptid
))
1087 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1093 /* The dcache reads whole cache lines; that doesn't play well
1094 with reading from a trace buffer, because reading outside of
1095 the collected memory range fails. */
1096 && get_traceframe_number () == -1
1097 && (region
->attrib
.cache
1098 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1099 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1101 DCACHE
*dcache
= target_dcache_get_or_init ();
1103 return dcache_read_memory_partial (ops
, dcache
, memaddr
, readbuf
,
1104 reg_len
, xfered_len
);
1107 /* If none of those methods found the memory we wanted, fall back
1108 to a target partial transfer. Normally a single call to
1109 to_xfer_partial is enough; if it doesn't recognize an object
1110 it will call the to_xfer_partial of the next target down.
1111 But for memory this won't do. Memory is the only target
1112 object which can be read from more than one valid target.
1113 A core file, for instance, could have some of memory but
1114 delegate other bits to the target below it. So, we must
1115 manually try all targets. */
1117 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1120 /* If we still haven't got anything, return the last error. We
1125 /* Perform a partial memory transfer. For docs see target.h,
1128 static enum target_xfer_status
1129 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1130 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1131 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1133 enum target_xfer_status res
;
1135 /* Zero length requests are ok and require no work. */
1137 return TARGET_XFER_EOF
;
1139 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1140 breakpoint insns, thus hiding out from higher layers whether
1141 there are software breakpoints inserted in the code stream. */
1142 if (readbuf
!= NULL
)
1144 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1147 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1148 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, *xfered_len
);
1153 struct cleanup
*old_chain
;
1155 /* A large write request is likely to be partially satisfied
1156 by memory_xfer_partial_1. We will continually malloc
1157 and free a copy of the entire write request for breakpoint
1158 shadow handling even though we only end up writing a small
1159 subset of it. Cap writes to 4KB to mitigate this. */
1160 len
= min (4096, len
);
1162 buf
= xmalloc (len
);
1163 old_chain
= make_cleanup (xfree
, buf
);
1164 memcpy (buf
, writebuf
, len
);
1166 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1167 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1170 do_cleanups (old_chain
);
1177 restore_show_memory_breakpoints (void *arg
)
1179 show_memory_breakpoints
= (uintptr_t) arg
;
1183 make_show_memory_breakpoints_cleanup (int show
)
1185 int current
= show_memory_breakpoints
;
1187 show_memory_breakpoints
= show
;
1188 return make_cleanup (restore_show_memory_breakpoints
,
1189 (void *) (uintptr_t) current
);
1192 /* For docs see target.h, to_xfer_partial. */
1194 enum target_xfer_status
1195 target_xfer_partial (struct target_ops
*ops
,
1196 enum target_object object
, const char *annex
,
1197 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1198 ULONGEST offset
, ULONGEST len
,
1199 ULONGEST
*xfered_len
)
1201 enum target_xfer_status retval
;
1203 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1205 /* Transfer is done when LEN is zero. */
1207 return TARGET_XFER_EOF
;
1209 if (writebuf
&& !may_write_memory
)
1210 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1211 core_addr_to_string_nz (offset
), plongest (len
));
1215 /* If this is a memory transfer, let the memory-specific code
1216 have a look at it instead. Memory transfers are more
1218 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1219 || object
== TARGET_OBJECT_CODE_MEMORY
)
1220 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1221 writebuf
, offset
, len
, xfered_len
);
1222 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1224 /* Request the normal memory object from other layers. */
1225 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1229 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1230 writebuf
, offset
, len
, xfered_len
);
1234 const unsigned char *myaddr
= NULL
;
1236 fprintf_unfiltered (gdb_stdlog
,
1237 "%s:target_xfer_partial "
1238 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1241 (annex
? annex
: "(null)"),
1242 host_address_to_string (readbuf
),
1243 host_address_to_string (writebuf
),
1244 core_addr_to_string_nz (offset
),
1245 pulongest (len
), retval
,
1246 pulongest (*xfered_len
));
1252 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1256 fputs_unfiltered (", bytes =", gdb_stdlog
);
1257 for (i
= 0; i
< *xfered_len
; i
++)
1259 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1261 if (targetdebug
< 2 && i
> 0)
1263 fprintf_unfiltered (gdb_stdlog
, " ...");
1266 fprintf_unfiltered (gdb_stdlog
, "\n");
1269 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1273 fputc_unfiltered ('\n', gdb_stdlog
);
1276 /* Check implementations of to_xfer_partial update *XFERED_LEN
1277 properly. Do assertion after printing debug messages, so that we
1278 can find more clues on assertion failure from debugging messages. */
1279 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_UNAVAILABLE
)
1280 gdb_assert (*xfered_len
> 0);
1285 /* Read LEN bytes of target memory at address MEMADDR, placing the
1286 results in GDB's memory at MYADDR. Returns either 0 for success or
1287 TARGET_XFER_E_IO if any error occurs.
1289 If an error occurs, no guarantee is made about the contents of the data at
1290 MYADDR. In particular, the caller should not depend upon partial reads
1291 filling the buffer with good data. There is no way for the caller to know
1292 how much good data might have been transfered anyway. Callers that can
1293 deal with partial reads should call target_read (which will retry until
1294 it makes no progress, and then return how much was transferred). */
1297 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1299 /* Dispatch to the topmost target, not the flattened current_target.
1300 Memory accesses check target->to_has_(all_)memory, and the
1301 flattened target doesn't inherit those. */
1302 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1303 myaddr
, memaddr
, len
) == len
)
1306 return TARGET_XFER_E_IO
;
1309 /* Like target_read_memory, but specify explicitly that this is a read
1310 from the target's raw memory. That is, this read bypasses the
1311 dcache, breakpoint shadowing, etc. */
1314 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1316 /* See comment in target_read_memory about why the request starts at
1317 current_target.beneath. */
1318 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1319 myaddr
, memaddr
, len
) == len
)
1322 return TARGET_XFER_E_IO
;
1325 /* Like target_read_memory, but specify explicitly that this is a read from
1326 the target's stack. This may trigger different cache behavior. */
1329 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1331 /* See comment in target_read_memory about why the request starts at
1332 current_target.beneath. */
1333 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1334 myaddr
, memaddr
, len
) == len
)
1337 return TARGET_XFER_E_IO
;
1340 /* Like target_read_memory, but specify explicitly that this is a read from
1341 the target's code. This may trigger different cache behavior. */
1344 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1346 /* See comment in target_read_memory about why the request starts at
1347 current_target.beneath. */
1348 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1349 myaddr
, memaddr
, len
) == len
)
1352 return TARGET_XFER_E_IO
;
1355 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1356 Returns either 0 for success or TARGET_XFER_E_IO if any
1357 error occurs. If an error occurs, no guarantee is made about how
1358 much data got written. Callers that can deal with partial writes
1359 should call target_write. */
1362 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1364 /* See comment in target_read_memory about why the request starts at
1365 current_target.beneath. */
1366 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1367 myaddr
, memaddr
, len
) == len
)
1370 return TARGET_XFER_E_IO
;
1373 /* Write LEN bytes from MYADDR to target raw memory at address
1374 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1375 if any error occurs. If an error occurs, no guarantee is made
1376 about how much data got written. Callers that can deal with
1377 partial writes should call target_write. */
1380 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1382 /* See comment in target_read_memory about why the request starts at
1383 current_target.beneath. */
1384 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1385 myaddr
, memaddr
, len
) == len
)
1388 return TARGET_XFER_E_IO
;
1391 /* Fetch the target's memory map. */
1394 target_memory_map (void)
1396 VEC(mem_region_s
) *result
;
1397 struct mem_region
*last_one
, *this_one
;
1399 struct target_ops
*t
;
1402 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1404 result
= current_target
.to_memory_map (¤t_target
);
1408 qsort (VEC_address (mem_region_s
, result
),
1409 VEC_length (mem_region_s
, result
),
1410 sizeof (struct mem_region
), mem_region_cmp
);
1412 /* Check that regions do not overlap. Simultaneously assign
1413 a numbering for the "mem" commands to use to refer to
1416 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1418 this_one
->number
= ix
;
1420 if (last_one
&& last_one
->hi
> this_one
->lo
)
1422 warning (_("Overlapping regions in memory map: ignoring"));
1423 VEC_free (mem_region_s
, result
);
1426 last_one
= this_one
;
1433 target_flash_erase (ULONGEST address
, LONGEST length
)
1436 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1437 hex_string (address
), phex (length
, 0));
1438 current_target
.to_flash_erase (¤t_target
, address
, length
);
1442 target_flash_done (void)
1445 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1446 current_target
.to_flash_done (¤t_target
);
1450 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1451 struct cmd_list_element
*c
, const char *value
)
1453 fprintf_filtered (file
,
1454 _("Mode for reading from readonly sections is %s.\n"),
1458 /* Target vector read/write partial wrapper functions. */
1460 static enum target_xfer_status
1461 target_read_partial (struct target_ops
*ops
,
1462 enum target_object object
,
1463 const char *annex
, gdb_byte
*buf
,
1464 ULONGEST offset
, ULONGEST len
,
1465 ULONGEST
*xfered_len
)
1467 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1471 static enum target_xfer_status
1472 target_write_partial (struct target_ops
*ops
,
1473 enum target_object object
,
1474 const char *annex
, const gdb_byte
*buf
,
1475 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1477 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1481 /* Wrappers to perform the full transfer. */
1483 /* For docs on target_read see target.h. */
1486 target_read (struct target_ops
*ops
,
1487 enum target_object object
,
1488 const char *annex
, gdb_byte
*buf
,
1489 ULONGEST offset
, LONGEST len
)
1493 while (xfered
< len
)
1495 ULONGEST xfered_len
;
1496 enum target_xfer_status status
;
1498 status
= target_read_partial (ops
, object
, annex
,
1499 (gdb_byte
*) buf
+ xfered
,
1500 offset
+ xfered
, len
- xfered
,
1503 /* Call an observer, notifying them of the xfer progress? */
1504 if (status
== TARGET_XFER_EOF
)
1506 else if (status
== TARGET_XFER_OK
)
1508 xfered
+= xfered_len
;
1518 /* Assuming that the entire [begin, end) range of memory cannot be
1519 read, try to read whatever subrange is possible to read.
1521 The function returns, in RESULT, either zero or one memory block.
1522 If there's a readable subrange at the beginning, it is completely
1523 read and returned. Any further readable subrange will not be read.
1524 Otherwise, if there's a readable subrange at the end, it will be
1525 completely read and returned. Any readable subranges before it
1526 (obviously, not starting at the beginning), will be ignored. In
1527 other cases -- either no readable subrange, or readable subrange(s)
1528 that is neither at the beginning, or end, nothing is returned.
1530 The purpose of this function is to handle a read across a boundary
1531 of accessible memory in a case when memory map is not available.
1532 The above restrictions are fine for this case, but will give
1533 incorrect results if the memory is 'patchy'. However, supporting
1534 'patchy' memory would require trying to read every single byte,
1535 and it seems unacceptable solution. Explicit memory map is
1536 recommended for this case -- and target_read_memory_robust will
1537 take care of reading multiple ranges then. */
1540 read_whatever_is_readable (struct target_ops
*ops
,
1541 ULONGEST begin
, ULONGEST end
,
1542 VEC(memory_read_result_s
) **result
)
1544 gdb_byte
*buf
= xmalloc (end
- begin
);
1545 ULONGEST current_begin
= begin
;
1546 ULONGEST current_end
= end
;
1548 memory_read_result_s r
;
1549 ULONGEST xfered_len
;
1551 /* If we previously failed to read 1 byte, nothing can be done here. */
1552 if (end
- begin
<= 1)
1558 /* Check that either first or the last byte is readable, and give up
1559 if not. This heuristic is meant to permit reading accessible memory
1560 at the boundary of accessible region. */
1561 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1562 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
1567 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1568 buf
+ (end
-begin
) - 1, end
- 1, 1,
1569 &xfered_len
) == TARGET_XFER_OK
)
1580 /* Loop invariant is that the [current_begin, current_end) was previously
1581 found to be not readable as a whole.
1583 Note loop condition -- if the range has 1 byte, we can't divide the range
1584 so there's no point trying further. */
1585 while (current_end
- current_begin
> 1)
1587 ULONGEST first_half_begin
, first_half_end
;
1588 ULONGEST second_half_begin
, second_half_end
;
1590 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
1594 first_half_begin
= current_begin
;
1595 first_half_end
= middle
;
1596 second_half_begin
= middle
;
1597 second_half_end
= current_end
;
1601 first_half_begin
= middle
;
1602 first_half_end
= current_end
;
1603 second_half_begin
= current_begin
;
1604 second_half_end
= middle
;
1607 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1608 buf
+ (first_half_begin
- begin
),
1610 first_half_end
- first_half_begin
);
1612 if (xfer
== first_half_end
- first_half_begin
)
1614 /* This half reads up fine. So, the error must be in the
1616 current_begin
= second_half_begin
;
1617 current_end
= second_half_end
;
1621 /* This half is not readable. Because we've tried one byte, we
1622 know some part of this half if actually redable. Go to the next
1623 iteration to divide again and try to read.
1625 We don't handle the other half, because this function only tries
1626 to read a single readable subrange. */
1627 current_begin
= first_half_begin
;
1628 current_end
= first_half_end
;
1634 /* The [begin, current_begin) range has been read. */
1636 r
.end
= current_begin
;
1641 /* The [current_end, end) range has been read. */
1642 LONGEST rlen
= end
- current_end
;
1644 r
.data
= xmalloc (rlen
);
1645 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
1646 r
.begin
= current_end
;
1650 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
1654 free_memory_read_result_vector (void *x
)
1656 VEC(memory_read_result_s
) *v
= x
;
1657 memory_read_result_s
*current
;
1660 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
1662 xfree (current
->data
);
1664 VEC_free (memory_read_result_s
, v
);
1667 VEC(memory_read_result_s
) *
1668 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
1670 VEC(memory_read_result_s
) *result
= 0;
1673 while (xfered
< len
)
1675 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
1678 /* If there is no explicit region, a fake one should be created. */
1679 gdb_assert (region
);
1681 if (region
->hi
== 0)
1682 rlen
= len
- xfered
;
1684 rlen
= region
->hi
- offset
;
1686 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
1688 /* Cannot read this region. Note that we can end up here only
1689 if the region is explicitly marked inaccessible, or
1690 'inaccessible-by-default' is in effect. */
1695 LONGEST to_read
= min (len
- xfered
, rlen
);
1696 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
1698 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1699 (gdb_byte
*) buffer
,
1700 offset
+ xfered
, to_read
);
1701 /* Call an observer, notifying them of the xfer progress? */
1704 /* Got an error reading full chunk. See if maybe we can read
1707 read_whatever_is_readable (ops
, offset
+ xfered
,
1708 offset
+ xfered
+ to_read
, &result
);
1713 struct memory_read_result r
;
1715 r
.begin
= offset
+ xfered
;
1716 r
.end
= r
.begin
+ xfer
;
1717 VEC_safe_push (memory_read_result_s
, result
, &r
);
1727 /* An alternative to target_write with progress callbacks. */
1730 target_write_with_progress (struct target_ops
*ops
,
1731 enum target_object object
,
1732 const char *annex
, const gdb_byte
*buf
,
1733 ULONGEST offset
, LONGEST len
,
1734 void (*progress
) (ULONGEST
, void *), void *baton
)
1738 /* Give the progress callback a chance to set up. */
1740 (*progress
) (0, baton
);
1742 while (xfered
< len
)
1744 ULONGEST xfered_len
;
1745 enum target_xfer_status status
;
1747 status
= target_write_partial (ops
, object
, annex
,
1748 (gdb_byte
*) buf
+ xfered
,
1749 offset
+ xfered
, len
- xfered
,
1752 if (status
!= TARGET_XFER_OK
)
1753 return status
== TARGET_XFER_EOF
? xfered
: -1;
1756 (*progress
) (xfered_len
, baton
);
1758 xfered
+= xfered_len
;
1764 /* For docs on target_write see target.h. */
1767 target_write (struct target_ops
*ops
,
1768 enum target_object object
,
1769 const char *annex
, const gdb_byte
*buf
,
1770 ULONGEST offset
, LONGEST len
)
1772 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
1776 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1777 the size of the transferred data. PADDING additional bytes are
1778 available in *BUF_P. This is a helper function for
1779 target_read_alloc; see the declaration of that function for more
1783 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
1784 const char *annex
, gdb_byte
**buf_p
, int padding
)
1786 size_t buf_alloc
, buf_pos
;
1789 /* This function does not have a length parameter; it reads the
1790 entire OBJECT). Also, it doesn't support objects fetched partly
1791 from one target and partly from another (in a different stratum,
1792 e.g. a core file and an executable). Both reasons make it
1793 unsuitable for reading memory. */
1794 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
1796 /* Start by reading up to 4K at a time. The target will throttle
1797 this number down if necessary. */
1799 buf
= xmalloc (buf_alloc
);
1803 ULONGEST xfered_len
;
1804 enum target_xfer_status status
;
1806 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
1807 buf_pos
, buf_alloc
- buf_pos
- padding
,
1810 if (status
== TARGET_XFER_EOF
)
1812 /* Read all there was. */
1819 else if (status
!= TARGET_XFER_OK
)
1821 /* An error occurred. */
1823 return TARGET_XFER_E_IO
;
1826 buf_pos
+= xfered_len
;
1828 /* If the buffer is filling up, expand it. */
1829 if (buf_alloc
< buf_pos
* 2)
1832 buf
= xrealloc (buf
, buf_alloc
);
1839 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1840 the size of the transferred data. See the declaration in "target.h"
1841 function for more information about the return value. */
1844 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
1845 const char *annex
, gdb_byte
**buf_p
)
1847 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
1850 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1851 returned as a string, allocated using xmalloc. If an error occurs
1852 or the transfer is unsupported, NULL is returned. Empty objects
1853 are returned as allocated but empty strings. A warning is issued
1854 if the result contains any embedded NUL bytes. */
1857 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
1862 LONGEST i
, transferred
;
1864 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
1865 bufstr
= (char *) buffer
;
1867 if (transferred
< 0)
1870 if (transferred
== 0)
1871 return xstrdup ("");
1873 bufstr
[transferred
] = 0;
1875 /* Check for embedded NUL bytes; but allow trailing NULs. */
1876 for (i
= strlen (bufstr
); i
< transferred
; i
++)
1879 warning (_("target object %d, annex %s, "
1880 "contained unexpected null characters"),
1881 (int) object
, annex
? annex
: "(none)");
1888 /* Memory transfer methods. */
1891 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
1894 /* This method is used to read from an alternate, non-current
1895 target. This read must bypass the overlay support (as symbols
1896 don't match this target), and GDB's internal cache (wrong cache
1897 for this target). */
1898 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
1900 memory_error (TARGET_XFER_E_IO
, addr
);
1904 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
1905 int len
, enum bfd_endian byte_order
)
1907 gdb_byte buf
[sizeof (ULONGEST
)];
1909 gdb_assert (len
<= sizeof (buf
));
1910 get_target_memory (ops
, addr
, buf
, len
);
1911 return extract_unsigned_integer (buf
, len
, byte_order
);
1917 target_insert_breakpoint (struct gdbarch
*gdbarch
,
1918 struct bp_target_info
*bp_tgt
)
1920 if (!may_insert_breakpoints
)
1922 warning (_("May not insert breakpoints"));
1926 return current_target
.to_insert_breakpoint (¤t_target
,
1933 target_remove_breakpoint (struct gdbarch
*gdbarch
,
1934 struct bp_target_info
*bp_tgt
)
1936 /* This is kind of a weird case to handle, but the permission might
1937 have been changed after breakpoints were inserted - in which case
1938 we should just take the user literally and assume that any
1939 breakpoints should be left in place. */
1940 if (!may_insert_breakpoints
)
1942 warning (_("May not remove breakpoints"));
1946 return current_target
.to_remove_breakpoint (¤t_target
,
1951 target_info (char *args
, int from_tty
)
1953 struct target_ops
*t
;
1954 int has_all_mem
= 0;
1956 if (symfile_objfile
!= NULL
)
1957 printf_unfiltered (_("Symbols from \"%s\".\n"),
1958 objfile_name (symfile_objfile
));
1960 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
1962 if (!(*t
->to_has_memory
) (t
))
1965 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
1968 printf_unfiltered (_("\tWhile running this, "
1969 "GDB does not access memory from...\n"));
1970 printf_unfiltered ("%s:\n", t
->to_longname
);
1971 (t
->to_files_info
) (t
);
1972 has_all_mem
= (*t
->to_has_all_memory
) (t
);
1976 /* This function is called before any new inferior is created, e.g.
1977 by running a program, attaching, or connecting to a target.
1978 It cleans up any state from previous invocations which might
1979 change between runs. This is a subset of what target_preopen
1980 resets (things which might change between targets). */
1983 target_pre_inferior (int from_tty
)
1985 /* Clear out solib state. Otherwise the solib state of the previous
1986 inferior might have survived and is entirely wrong for the new
1987 target. This has been observed on GNU/Linux using glibc 2.3. How
1999 Cannot access memory at address 0xdeadbeef
2002 /* In some OSs, the shared library list is the same/global/shared
2003 across inferiors. If code is shared between processes, so are
2004 memory regions and features. */
2005 if (!gdbarch_has_global_solist (target_gdbarch ()))
2007 no_shared_libraries (NULL
, from_tty
);
2009 invalidate_target_mem_regions ();
2011 target_clear_description ();
2014 agent_capability_invalidate ();
2017 /* Callback for iterate_over_inferiors. Gets rid of the given
2021 dispose_inferior (struct inferior
*inf
, void *args
)
2023 struct thread_info
*thread
;
2025 thread
= any_thread_of_process (inf
->pid
);
2028 switch_to_thread (thread
->ptid
);
2030 /* Core inferiors actually should be detached, not killed. */
2031 if (target_has_execution
)
2034 target_detach (NULL
, 0);
2040 /* This is to be called by the open routine before it does
2044 target_preopen (int from_tty
)
2048 if (have_inferiors ())
2051 || !have_live_inferiors ()
2052 || query (_("A program is being debugged already. Kill it? ")))
2053 iterate_over_inferiors (dispose_inferior
, NULL
);
2055 error (_("Program not killed."));
2058 /* Calling target_kill may remove the target from the stack. But if
2059 it doesn't (which seems like a win for UDI), remove it now. */
2060 /* Leave the exec target, though. The user may be switching from a
2061 live process to a core of the same program. */
2062 pop_all_targets_above (file_stratum
);
2064 target_pre_inferior (from_tty
);
2067 /* Detach a target after doing deferred register stores. */
2070 target_detach (const char *args
, int from_tty
)
2072 struct target_ops
* t
;
2074 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2075 /* Don't remove global breakpoints here. They're removed on
2076 disconnection from the target. */
2079 /* If we're in breakpoints-always-inserted mode, have to remove
2080 them before detaching. */
2081 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2083 prepare_for_detach ();
2085 current_target
.to_detach (¤t_target
, args
, from_tty
);
2087 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2092 target_disconnect (const char *args
, int from_tty
)
2094 /* If we're in breakpoints-always-inserted mode or if breakpoints
2095 are global across processes, we have to remove them before
2097 remove_breakpoints ();
2100 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2102 current_target
.to_disconnect (¤t_target
, args
, from_tty
);
2106 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2108 struct target_ops
*t
;
2109 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2114 char *status_string
;
2115 char *options_string
;
2117 status_string
= target_waitstatus_to_string (status
);
2118 options_string
= target_options_to_string (options
);
2119 fprintf_unfiltered (gdb_stdlog
,
2120 "target_wait (%d, status, options={%s})"
2122 ptid_get_pid (ptid
), options_string
,
2123 ptid_get_pid (retval
), status_string
);
2124 xfree (status_string
);
2125 xfree (options_string
);
2132 target_pid_to_str (ptid_t ptid
)
2134 return (*current_target
.to_pid_to_str
) (¤t_target
, ptid
);
2138 target_thread_name (struct thread_info
*info
)
2140 return current_target
.to_thread_name (¤t_target
, info
);
2144 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2146 struct target_ops
*t
;
2148 target_dcache_invalidate ();
2150 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2152 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2153 ptid_get_pid (ptid
),
2154 step
? "step" : "continue",
2155 gdb_signal_to_name (signal
));
2157 registers_changed_ptid (ptid
);
2158 /* We only set the internal executing state here. The user/frontend
2159 running state is set at a higher level. */
2160 set_executing (ptid
, 1);
2161 clear_inline_frame_state (ptid
);
2165 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2171 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2174 for (i
= 0; i
< numsigs
; i
++)
2175 if (pass_signals
[i
])
2176 fprintf_unfiltered (gdb_stdlog
, " %s",
2177 gdb_signal_to_name (i
));
2179 fprintf_unfiltered (gdb_stdlog
, " })\n");
2182 (*current_target
.to_pass_signals
) (¤t_target
, numsigs
, pass_signals
);
2186 target_program_signals (int numsigs
, unsigned char *program_signals
)
2192 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2195 for (i
= 0; i
< numsigs
; i
++)
2196 if (program_signals
[i
])
2197 fprintf_unfiltered (gdb_stdlog
, " %s",
2198 gdb_signal_to_name (i
));
2200 fprintf_unfiltered (gdb_stdlog
, " })\n");
2203 (*current_target
.to_program_signals
) (¤t_target
,
2204 numsigs
, program_signals
);
2208 default_follow_fork (struct target_ops
*self
, int follow_child
,
2211 /* Some target returned a fork event, but did not know how to follow it. */
2212 internal_error (__FILE__
, __LINE__
,
2213 _("could not find a target to follow fork"));
2216 /* Look through the list of possible targets for a target that can
2220 target_follow_fork (int follow_child
, int detach_fork
)
2222 int retval
= current_target
.to_follow_fork (¤t_target
,
2223 follow_child
, detach_fork
);
2226 fprintf_unfiltered (gdb_stdlog
,
2227 "target_follow_fork (%d, %d) = %d\n",
2228 follow_child
, detach_fork
, retval
);
2233 default_mourn_inferior (struct target_ops
*self
)
2235 internal_error (__FILE__
, __LINE__
,
2236 _("could not find a target to follow mourn inferior"));
2240 target_mourn_inferior (void)
2242 current_target
.to_mourn_inferior (¤t_target
);
2244 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2246 /* We no longer need to keep handles on any of the object files.
2247 Make sure to release them to avoid unnecessarily locking any
2248 of them while we're not actually debugging. */
2249 bfd_cache_close_all ();
2252 /* Look for a target which can describe architectural features, starting
2253 from TARGET. If we find one, return its description. */
2255 const struct target_desc
*
2256 target_read_description (struct target_ops
*target
)
2258 return target
->to_read_description (target
);
2261 /* This implements a basic search of memory, reading target memory and
2262 performing the search here (as opposed to performing the search in on the
2263 target side with, for example, gdbserver). */
2266 simple_search_memory (struct target_ops
*ops
,
2267 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2268 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2269 CORE_ADDR
*found_addrp
)
2271 /* NOTE: also defined in find.c testcase. */
2272 #define SEARCH_CHUNK_SIZE 16000
2273 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2274 /* Buffer to hold memory contents for searching. */
2275 gdb_byte
*search_buf
;
2276 unsigned search_buf_size
;
2277 struct cleanup
*old_cleanups
;
2279 search_buf_size
= chunk_size
+ pattern_len
- 1;
2281 /* No point in trying to allocate a buffer larger than the search space. */
2282 if (search_space_len
< search_buf_size
)
2283 search_buf_size
= search_space_len
;
2285 search_buf
= malloc (search_buf_size
);
2286 if (search_buf
== NULL
)
2287 error (_("Unable to allocate memory to perform the search."));
2288 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2290 /* Prime the search buffer. */
2292 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2293 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2295 warning (_("Unable to access %s bytes of target "
2296 "memory at %s, halting search."),
2297 pulongest (search_buf_size
), hex_string (start_addr
));
2298 do_cleanups (old_cleanups
);
2302 /* Perform the search.
2304 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2305 When we've scanned N bytes we copy the trailing bytes to the start and
2306 read in another N bytes. */
2308 while (search_space_len
>= pattern_len
)
2310 gdb_byte
*found_ptr
;
2311 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2313 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2314 pattern
, pattern_len
);
2316 if (found_ptr
!= NULL
)
2318 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2320 *found_addrp
= found_addr
;
2321 do_cleanups (old_cleanups
);
2325 /* Not found in this chunk, skip to next chunk. */
2327 /* Don't let search_space_len wrap here, it's unsigned. */
2328 if (search_space_len
>= chunk_size
)
2329 search_space_len
-= chunk_size
;
2331 search_space_len
= 0;
2333 if (search_space_len
>= pattern_len
)
2335 unsigned keep_len
= search_buf_size
- chunk_size
;
2336 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2339 /* Copy the trailing part of the previous iteration to the front
2340 of the buffer for the next iteration. */
2341 gdb_assert (keep_len
== pattern_len
- 1);
2342 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2344 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2346 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2347 search_buf
+ keep_len
, read_addr
,
2348 nr_to_read
) != nr_to_read
)
2350 warning (_("Unable to access %s bytes of target "
2351 "memory at %s, halting search."),
2352 plongest (nr_to_read
),
2353 hex_string (read_addr
));
2354 do_cleanups (old_cleanups
);
2358 start_addr
+= chunk_size
;
2364 do_cleanups (old_cleanups
);
2368 /* Default implementation of memory-searching. */
2371 default_search_memory (struct target_ops
*self
,
2372 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2373 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2374 CORE_ADDR
*found_addrp
)
2376 /* Start over from the top of the target stack. */
2377 return simple_search_memory (current_target
.beneath
,
2378 start_addr
, search_space_len
,
2379 pattern
, pattern_len
, found_addrp
);
2382 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2383 sequence of bytes in PATTERN with length PATTERN_LEN.
2385 The result is 1 if found, 0 if not found, and -1 if there was an error
2386 requiring halting of the search (e.g. memory read error).
2387 If the pattern is found the address is recorded in FOUND_ADDRP. */
2390 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2391 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2392 CORE_ADDR
*found_addrp
)
2397 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2398 hex_string (start_addr
));
2400 found
= current_target
.to_search_memory (¤t_target
, start_addr
,
2402 pattern
, pattern_len
, found_addrp
);
2405 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2410 /* Look through the currently pushed targets. If none of them will
2411 be able to restart the currently running process, issue an error
2415 target_require_runnable (void)
2417 struct target_ops
*t
;
2419 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2421 /* If this target knows how to create a new program, then
2422 assume we will still be able to after killing the current
2423 one. Either killing and mourning will not pop T, or else
2424 find_default_run_target will find it again. */
2425 if (t
->to_create_inferior
!= NULL
)
2428 /* Do not worry about thread_stratum targets that can not
2429 create inferiors. Assume they will be pushed again if
2430 necessary, and continue to the process_stratum. */
2431 if (t
->to_stratum
== thread_stratum
2432 || t
->to_stratum
== arch_stratum
)
2435 error (_("The \"%s\" target does not support \"run\". "
2436 "Try \"help target\" or \"continue\"."),
2440 /* This function is only called if the target is running. In that
2441 case there should have been a process_stratum target and it
2442 should either know how to create inferiors, or not... */
2443 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2446 /* Whether GDB is allowed to fall back to the default run target for
2447 "run", "attach", etc. when no target is connected yet. */
2448 static int auto_connect_native_target
= 1;
2451 show_auto_connect_native_target (struct ui_file
*file
, int from_tty
,
2452 struct cmd_list_element
*c
, const char *value
)
2454 fprintf_filtered (file
,
2455 _("Whether GDB may automatically connect to the "
2456 "native target is %s.\n"),
2460 /* Look through the list of possible targets for a target that can
2461 execute a run or attach command without any other data. This is
2462 used to locate the default process stratum.
2464 If DO_MESG is not NULL, the result is always valid (error() is
2465 called for errors); else, return NULL on error. */
2467 static struct target_ops
*
2468 find_default_run_target (char *do_mesg
)
2470 struct target_ops
*runable
= NULL
;
2472 if (auto_connect_native_target
)
2474 struct target_ops
**t
;
2477 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2480 if ((*t
)->to_can_run
!= delegate_can_run
&& target_can_run (*t
))
2491 if (runable
== NULL
)
2494 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2505 find_attach_target (void)
2507 struct target_ops
*t
;
2509 /* If a target on the current stack can attach, use it. */
2510 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2512 if (t
->to_attach
!= NULL
)
2516 /* Otherwise, use the default run target for attaching. */
2518 t
= find_default_run_target ("attach");
2526 find_run_target (void)
2528 struct target_ops
*t
;
2530 /* If a target on the current stack can attach, use it. */
2531 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2533 if (t
->to_create_inferior
!= NULL
)
2537 /* Otherwise, use the default run target. */
2539 t
= find_default_run_target ("run");
2544 /* Implement the "info proc" command. */
2547 target_info_proc (const char *args
, enum info_proc_what what
)
2549 struct target_ops
*t
;
2551 /* If we're already connected to something that can get us OS
2552 related data, use it. Otherwise, try using the native
2554 if (current_target
.to_stratum
>= process_stratum
)
2555 t
= current_target
.beneath
;
2557 t
= find_default_run_target (NULL
);
2559 for (; t
!= NULL
; t
= t
->beneath
)
2561 if (t
->to_info_proc
!= NULL
)
2563 t
->to_info_proc (t
, args
, what
);
2566 fprintf_unfiltered (gdb_stdlog
,
2567 "target_info_proc (\"%s\", %d)\n", args
, what
);
2577 find_default_supports_disable_randomization (struct target_ops
*self
)
2579 struct target_ops
*t
;
2581 t
= find_default_run_target (NULL
);
2582 if (t
&& t
->to_supports_disable_randomization
)
2583 return (t
->to_supports_disable_randomization
) (t
);
2588 target_supports_disable_randomization (void)
2590 struct target_ops
*t
;
2592 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
2593 if (t
->to_supports_disable_randomization
)
2594 return t
->to_supports_disable_randomization (t
);
2600 target_get_osdata (const char *type
)
2602 struct target_ops
*t
;
2604 /* If we're already connected to something that can get us OS
2605 related data, use it. Otherwise, try using the native
2607 if (current_target
.to_stratum
>= process_stratum
)
2608 t
= current_target
.beneath
;
2610 t
= find_default_run_target ("get OS data");
2615 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
2618 static struct address_space
*
2619 default_thread_address_space (struct target_ops
*self
, ptid_t ptid
)
2621 struct inferior
*inf
;
2623 /* Fall-back to the "main" address space of the inferior. */
2624 inf
= find_inferior_pid (ptid_get_pid (ptid
));
2626 if (inf
== NULL
|| inf
->aspace
== NULL
)
2627 internal_error (__FILE__
, __LINE__
,
2628 _("Can't determine the current "
2629 "address space of thread %s\n"),
2630 target_pid_to_str (ptid
));
2635 /* Determine the current address space of thread PTID. */
2637 struct address_space
*
2638 target_thread_address_space (ptid_t ptid
)
2640 struct address_space
*aspace
;
2642 aspace
= current_target
.to_thread_address_space (¤t_target
, ptid
);
2643 gdb_assert (aspace
!= NULL
);
2646 fprintf_unfiltered (gdb_stdlog
,
2647 "target_thread_address_space (%s) = %d\n",
2648 target_pid_to_str (ptid
),
2649 address_space_num (aspace
));
2655 /* Target file operations. */
2657 static struct target_ops
*
2658 default_fileio_target (void)
2660 /* If we're already connected to something that can perform
2661 file I/O, use it. Otherwise, try using the native target. */
2662 if (current_target
.to_stratum
>= process_stratum
)
2663 return current_target
.beneath
;
2665 return find_default_run_target ("file I/O");
2668 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2669 target file descriptor, or -1 if an error occurs (and set
2672 target_fileio_open (const char *filename
, int flags
, int mode
,
2675 struct target_ops
*t
;
2677 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2679 if (t
->to_fileio_open
!= NULL
)
2681 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
2684 fprintf_unfiltered (gdb_stdlog
,
2685 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2686 filename
, flags
, mode
,
2687 fd
, fd
!= -1 ? 0 : *target_errno
);
2692 *target_errno
= FILEIO_ENOSYS
;
2696 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2697 Return the number of bytes written, or -1 if an error occurs
2698 (and set *TARGET_ERRNO). */
2700 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
2701 ULONGEST offset
, int *target_errno
)
2703 struct target_ops
*t
;
2705 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2707 if (t
->to_fileio_pwrite
!= NULL
)
2709 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
2713 fprintf_unfiltered (gdb_stdlog
,
2714 "target_fileio_pwrite (%d,...,%d,%s) "
2716 fd
, len
, pulongest (offset
),
2717 ret
, ret
!= -1 ? 0 : *target_errno
);
2722 *target_errno
= FILEIO_ENOSYS
;
2726 /* Read up to LEN bytes FD on the target into READ_BUF.
2727 Return the number of bytes read, or -1 if an error occurs
2728 (and set *TARGET_ERRNO). */
2730 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
2731 ULONGEST offset
, int *target_errno
)
2733 struct target_ops
*t
;
2735 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2737 if (t
->to_fileio_pread
!= NULL
)
2739 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
2743 fprintf_unfiltered (gdb_stdlog
,
2744 "target_fileio_pread (%d,...,%d,%s) "
2746 fd
, len
, pulongest (offset
),
2747 ret
, ret
!= -1 ? 0 : *target_errno
);
2752 *target_errno
= FILEIO_ENOSYS
;
2756 /* Close FD on the target. Return 0, or -1 if an error occurs
2757 (and set *TARGET_ERRNO). */
2759 target_fileio_close (int fd
, int *target_errno
)
2761 struct target_ops
*t
;
2763 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2765 if (t
->to_fileio_close
!= NULL
)
2767 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
2770 fprintf_unfiltered (gdb_stdlog
,
2771 "target_fileio_close (%d) = %d (%d)\n",
2772 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
2777 *target_errno
= FILEIO_ENOSYS
;
2781 /* Unlink FILENAME on the target. Return 0, or -1 if an error
2782 occurs (and set *TARGET_ERRNO). */
2784 target_fileio_unlink (const char *filename
, int *target_errno
)
2786 struct target_ops
*t
;
2788 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2790 if (t
->to_fileio_unlink
!= NULL
)
2792 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
2795 fprintf_unfiltered (gdb_stdlog
,
2796 "target_fileio_unlink (%s) = %d (%d)\n",
2797 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
2802 *target_errno
= FILEIO_ENOSYS
;
2806 /* Read value of symbolic link FILENAME on the target. Return a
2807 null-terminated string allocated via xmalloc, or NULL if an error
2808 occurs (and set *TARGET_ERRNO). */
2810 target_fileio_readlink (const char *filename
, int *target_errno
)
2812 struct target_ops
*t
;
2814 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
2816 if (t
->to_fileio_readlink
!= NULL
)
2818 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
2821 fprintf_unfiltered (gdb_stdlog
,
2822 "target_fileio_readlink (%s) = %s (%d)\n",
2823 filename
, ret
? ret
: "(nil)",
2824 ret
? 0 : *target_errno
);
2829 *target_errno
= FILEIO_ENOSYS
;
2834 target_fileio_close_cleanup (void *opaque
)
2836 int fd
= *(int *) opaque
;
2839 target_fileio_close (fd
, &target_errno
);
2842 /* Read target file FILENAME. Store the result in *BUF_P and
2843 return the size of the transferred data. PADDING additional bytes are
2844 available in *BUF_P. This is a helper function for
2845 target_fileio_read_alloc; see the declaration of that function for more
2849 target_fileio_read_alloc_1 (const char *filename
,
2850 gdb_byte
**buf_p
, int padding
)
2852 struct cleanup
*close_cleanup
;
2853 size_t buf_alloc
, buf_pos
;
2859 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
2863 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
2865 /* Start by reading up to 4K at a time. The target will throttle
2866 this number down if necessary. */
2868 buf
= xmalloc (buf_alloc
);
2872 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
2873 buf_alloc
- buf_pos
- padding
, buf_pos
,
2877 /* An error occurred. */
2878 do_cleanups (close_cleanup
);
2884 /* Read all there was. */
2885 do_cleanups (close_cleanup
);
2895 /* If the buffer is filling up, expand it. */
2896 if (buf_alloc
< buf_pos
* 2)
2899 buf
= xrealloc (buf
, buf_alloc
);
2906 /* Read target file FILENAME. Store the result in *BUF_P and return
2907 the size of the transferred data. See the declaration in "target.h"
2908 function for more information about the return value. */
2911 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
2913 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
2916 /* Read target file FILENAME. The result is NUL-terminated and
2917 returned as a string, allocated using xmalloc. If an error occurs
2918 or the transfer is unsupported, NULL is returned. Empty objects
2919 are returned as allocated but empty strings. A warning is issued
2920 if the result contains any embedded NUL bytes. */
2923 target_fileio_read_stralloc (const char *filename
)
2927 LONGEST i
, transferred
;
2929 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
2930 bufstr
= (char *) buffer
;
2932 if (transferred
< 0)
2935 if (transferred
== 0)
2936 return xstrdup ("");
2938 bufstr
[transferred
] = 0;
2940 /* Check for embedded NUL bytes; but allow trailing NULs. */
2941 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2944 warning (_("target file %s "
2945 "contained unexpected null characters"),
2955 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
2956 CORE_ADDR addr
, int len
)
2958 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
2962 default_watchpoint_addr_within_range (struct target_ops
*target
,
2964 CORE_ADDR start
, int length
)
2966 return addr
>= start
&& addr
< start
+ length
;
2969 static struct gdbarch
*
2970 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
2972 return target_gdbarch ();
2976 return_zero (struct target_ops
*ignore
)
2982 return_zero_has_execution (struct target_ops
*ignore
, ptid_t ignore2
)
2988 * Find the next target down the stack from the specified target.
2992 find_target_beneath (struct target_ops
*t
)
3000 find_target_at (enum strata stratum
)
3002 struct target_ops
*t
;
3004 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3005 if (t
->to_stratum
== stratum
)
3012 /* The inferior process has died. Long live the inferior! */
3015 generic_mourn_inferior (void)
3019 ptid
= inferior_ptid
;
3020 inferior_ptid
= null_ptid
;
3022 /* Mark breakpoints uninserted in case something tries to delete a
3023 breakpoint while we delete the inferior's threads (which would
3024 fail, since the inferior is long gone). */
3025 mark_breakpoints_out ();
3027 if (!ptid_equal (ptid
, null_ptid
))
3029 int pid
= ptid_get_pid (ptid
);
3030 exit_inferior (pid
);
3033 /* Note this wipes step-resume breakpoints, so needs to be done
3034 after exit_inferior, which ends up referencing the step-resume
3035 breakpoints through clear_thread_inferior_resources. */
3036 breakpoint_init_inferior (inf_exited
);
3038 registers_changed ();
3040 reopen_exec_file ();
3041 reinit_frame_cache ();
3043 if (deprecated_detach_hook
)
3044 deprecated_detach_hook ();
3047 /* Convert a normal process ID to a string. Returns the string in a
3051 normal_pid_to_str (ptid_t ptid
)
3053 static char buf
[32];
3055 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3060 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3062 return normal_pid_to_str (ptid
);
3065 /* Error-catcher for target_find_memory_regions. */
3067 dummy_find_memory_regions (struct target_ops
*self
,
3068 find_memory_region_ftype ignore1
, void *ignore2
)
3070 error (_("Command not implemented for this target."));
3074 /* Error-catcher for target_make_corefile_notes. */
3076 dummy_make_corefile_notes (struct target_ops
*self
,
3077 bfd
*ignore1
, int *ignore2
)
3079 error (_("Command not implemented for this target."));
3083 /* Set up the handful of non-empty slots needed by the dummy target
3087 init_dummy_target (void)
3089 dummy_target
.to_shortname
= "None";
3090 dummy_target
.to_longname
= "None";
3091 dummy_target
.to_doc
= "";
3092 dummy_target
.to_supports_disable_randomization
3093 = find_default_supports_disable_randomization
;
3094 dummy_target
.to_stratum
= dummy_stratum
;
3095 dummy_target
.to_has_all_memory
= return_zero
;
3096 dummy_target
.to_has_memory
= return_zero
;
3097 dummy_target
.to_has_stack
= return_zero
;
3098 dummy_target
.to_has_registers
= return_zero
;
3099 dummy_target
.to_has_execution
= return_zero_has_execution
;
3100 dummy_target
.to_magic
= OPS_MAGIC
;
3102 install_dummy_methods (&dummy_target
);
3106 debug_to_open (char *args
, int from_tty
)
3108 debug_target
.to_open (args
, from_tty
);
3110 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3114 target_close (struct target_ops
*targ
)
3116 gdb_assert (!target_is_pushed (targ
));
3118 if (targ
->to_xclose
!= NULL
)
3119 targ
->to_xclose (targ
);
3120 else if (targ
->to_close
!= NULL
)
3121 targ
->to_close (targ
);
3124 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3128 target_thread_alive (ptid_t ptid
)
3132 retval
= current_target
.to_thread_alive (¤t_target
, ptid
);
3134 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3135 ptid_get_pid (ptid
), retval
);
3141 target_find_new_threads (void)
3143 current_target
.to_find_new_threads (¤t_target
);
3145 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3149 target_stop (ptid_t ptid
)
3153 warning (_("May not interrupt or stop the target, ignoring attempt"));
3157 (*current_target
.to_stop
) (¤t_target
, ptid
);
3161 debug_to_post_attach (struct target_ops
*self
, int pid
)
3163 debug_target
.to_post_attach (&debug_target
, pid
);
3165 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3168 /* Concatenate ELEM to LIST, a comma separate list, and return the
3169 result. The LIST incoming argument is released. */
3172 str_comma_list_concat_elem (char *list
, const char *elem
)
3175 return xstrdup (elem
);
3177 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3180 /* Helper for target_options_to_string. If OPT is present in
3181 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3182 Returns the new resulting string. OPT is removed from
3186 do_option (int *target_options
, char *ret
,
3187 int opt
, char *opt_str
)
3189 if ((*target_options
& opt
) != 0)
3191 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3192 *target_options
&= ~opt
;
3199 target_options_to_string (int target_options
)
3203 #define DO_TARG_OPTION(OPT) \
3204 ret = do_option (&target_options, ret, OPT, #OPT)
3206 DO_TARG_OPTION (TARGET_WNOHANG
);
3208 if (target_options
!= 0)
3209 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3217 debug_print_register (const char * func
,
3218 struct regcache
*regcache
, int regno
)
3220 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3222 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3223 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3224 && gdbarch_register_name (gdbarch
, regno
) != NULL
3225 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3226 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3227 gdbarch_register_name (gdbarch
, regno
));
3229 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3230 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3232 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3233 int i
, size
= register_size (gdbarch
, regno
);
3234 gdb_byte buf
[MAX_REGISTER_SIZE
];
3236 regcache_raw_collect (regcache
, regno
, buf
);
3237 fprintf_unfiltered (gdb_stdlog
, " = ");
3238 for (i
= 0; i
< size
; i
++)
3240 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3242 if (size
<= sizeof (LONGEST
))
3244 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3246 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3247 core_addr_to_string_nz (val
), plongest (val
));
3250 fprintf_unfiltered (gdb_stdlog
, "\n");
3254 target_fetch_registers (struct regcache
*regcache
, int regno
)
3256 current_target
.to_fetch_registers (¤t_target
, regcache
, regno
);
3258 debug_print_register ("target_fetch_registers", regcache
, regno
);
3262 target_store_registers (struct regcache
*regcache
, int regno
)
3264 struct target_ops
*t
;
3266 if (!may_write_registers
)
3267 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3269 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3272 debug_print_register ("target_store_registers", regcache
, regno
);
3277 target_core_of_thread (ptid_t ptid
)
3279 int retval
= current_target
.to_core_of_thread (¤t_target
, ptid
);
3282 fprintf_unfiltered (gdb_stdlog
,
3283 "target_core_of_thread (%d) = %d\n",
3284 ptid_get_pid (ptid
), retval
);
3289 simple_verify_memory (struct target_ops
*ops
,
3290 const gdb_byte
*data
, CORE_ADDR lma
, ULONGEST size
)
3292 LONGEST total_xfered
= 0;
3294 while (total_xfered
< size
)
3296 ULONGEST xfered_len
;
3297 enum target_xfer_status status
;
3299 ULONGEST howmuch
= min (sizeof (buf
), size
- total_xfered
);
3301 status
= target_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
3302 buf
, NULL
, lma
+ total_xfered
, howmuch
,
3304 if (status
== TARGET_XFER_OK
3305 && memcmp (data
+ total_xfered
, buf
, xfered_len
) == 0)
3307 total_xfered
+= xfered_len
;
3316 /* Default implementation of memory verification. */
3319 default_verify_memory (struct target_ops
*self
,
3320 const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3322 /* Start over from the top of the target stack. */
3323 return simple_verify_memory (current_target
.beneath
,
3324 data
, memaddr
, size
);
3328 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3330 int retval
= current_target
.to_verify_memory (¤t_target
,
3331 data
, memaddr
, size
);
3334 fprintf_unfiltered (gdb_stdlog
,
3335 "target_verify_memory (%s, %s) = %d\n",
3336 paddress (target_gdbarch (), memaddr
),
3342 /* The documentation for this function is in its prototype declaration in
3346 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3350 ret
= current_target
.to_insert_mask_watchpoint (¤t_target
,
3354 fprintf_unfiltered (gdb_stdlog
, "\
3355 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3356 core_addr_to_string (addr
),
3357 core_addr_to_string (mask
), rw
, ret
);
3362 /* The documentation for this function is in its prototype declaration in
3366 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3370 ret
= current_target
.to_remove_mask_watchpoint (¤t_target
,
3374 fprintf_unfiltered (gdb_stdlog
, "\
3375 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3376 core_addr_to_string (addr
),
3377 core_addr_to_string (mask
), rw
, ret
);
3382 /* The documentation for this function is in its prototype declaration
3386 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3388 return current_target
.to_masked_watch_num_registers (¤t_target
,
3392 /* The documentation for this function is in its prototype declaration
3396 target_ranged_break_num_registers (void)
3398 return current_target
.to_ranged_break_num_registers (¤t_target
);
3403 struct btrace_target_info
*
3404 target_enable_btrace (ptid_t ptid
)
3406 return current_target
.to_enable_btrace (¤t_target
, ptid
);
3412 target_disable_btrace (struct btrace_target_info
*btinfo
)
3414 current_target
.to_disable_btrace (¤t_target
, btinfo
);
3420 target_teardown_btrace (struct btrace_target_info
*btinfo
)
3422 current_target
.to_teardown_btrace (¤t_target
, btinfo
);
3428 target_read_btrace (VEC (btrace_block_s
) **btrace
,
3429 struct btrace_target_info
*btinfo
,
3430 enum btrace_read_type type
)
3432 return current_target
.to_read_btrace (¤t_target
, btrace
, btinfo
, type
);
3438 target_stop_recording (void)
3440 current_target
.to_stop_recording (¤t_target
);
3446 target_save_record (const char *filename
)
3448 current_target
.to_save_record (¤t_target
, filename
);
3454 target_supports_delete_record (void)
3456 struct target_ops
*t
;
3458 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3459 if (t
->to_delete_record
!= NULL
)
3468 target_delete_record (void)
3470 current_target
.to_delete_record (¤t_target
);
3476 target_record_is_replaying (void)
3478 return current_target
.to_record_is_replaying (¤t_target
);
3484 target_goto_record_begin (void)
3486 current_target
.to_goto_record_begin (¤t_target
);
3492 target_goto_record_end (void)
3494 current_target
.to_goto_record_end (¤t_target
);
3500 target_goto_record (ULONGEST insn
)
3502 current_target
.to_goto_record (¤t_target
, insn
);
3508 target_insn_history (int size
, int flags
)
3510 current_target
.to_insn_history (¤t_target
, size
, flags
);
3516 target_insn_history_from (ULONGEST from
, int size
, int flags
)
3518 current_target
.to_insn_history_from (¤t_target
, from
, size
, flags
);
3524 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
3526 current_target
.to_insn_history_range (¤t_target
, begin
, end
, flags
);
3532 target_call_history (int size
, int flags
)
3534 current_target
.to_call_history (¤t_target
, size
, flags
);
3540 target_call_history_from (ULONGEST begin
, int size
, int flags
)
3542 current_target
.to_call_history_from (¤t_target
, begin
, size
, flags
);
3548 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
3550 current_target
.to_call_history_range (¤t_target
, begin
, end
, flags
);
3554 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
3556 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
3558 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
3563 const struct frame_unwind
*
3564 target_get_unwinder (void)
3566 return current_target
.to_get_unwinder (¤t_target
);
3571 const struct frame_unwind
*
3572 target_get_tailcall_unwinder (void)
3574 return current_target
.to_get_tailcall_unwinder (¤t_target
);
3577 /* Default implementation of to_decr_pc_after_break. */
3580 default_target_decr_pc_after_break (struct target_ops
*ops
,
3581 struct gdbarch
*gdbarch
)
3583 return gdbarch_decr_pc_after_break (gdbarch
);
3589 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
3591 return current_target
.to_decr_pc_after_break (¤t_target
, gdbarch
);
3597 target_prepare_to_generate_core (void)
3599 current_target
.to_prepare_to_generate_core (¤t_target
);
3605 target_done_generating_core (void)
3607 current_target
.to_done_generating_core (¤t_target
);
3611 debug_to_files_info (struct target_ops
*target
)
3613 debug_target
.to_files_info (target
);
3615 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
3619 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
3620 struct bp_target_info
*bp_tgt
)
3624 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
3626 fprintf_unfiltered (gdb_stdlog
,
3627 "target_insert_breakpoint (%s, xxx) = %ld\n",
3628 core_addr_to_string (bp_tgt
->placed_address
),
3629 (unsigned long) retval
);
3634 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
3635 struct bp_target_info
*bp_tgt
)
3639 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
3641 fprintf_unfiltered (gdb_stdlog
,
3642 "target_remove_breakpoint (%s, xxx) = %ld\n",
3643 core_addr_to_string (bp_tgt
->placed_address
),
3644 (unsigned long) retval
);
3649 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
3650 int type
, int cnt
, int from_tty
)
3654 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
3655 type
, cnt
, from_tty
);
3657 fprintf_unfiltered (gdb_stdlog
,
3658 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3659 (unsigned long) type
,
3660 (unsigned long) cnt
,
3661 (unsigned long) from_tty
,
3662 (unsigned long) retval
);
3667 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3668 CORE_ADDR addr
, int len
)
3672 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
3675 fprintf_unfiltered (gdb_stdlog
,
3676 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3677 core_addr_to_string (addr
), (unsigned long) len
,
3678 core_addr_to_string (retval
));
3683 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
3684 CORE_ADDR addr
, int len
, int rw
,
3685 struct expression
*cond
)
3689 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
3693 fprintf_unfiltered (gdb_stdlog
,
3694 "target_can_accel_watchpoint_condition "
3695 "(%s, %d, %d, %s) = %ld\n",
3696 core_addr_to_string (addr
), len
, rw
,
3697 host_address_to_string (cond
), (unsigned long) retval
);
3702 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
3706 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
3708 fprintf_unfiltered (gdb_stdlog
,
3709 "target_stopped_by_watchpoint () = %ld\n",
3710 (unsigned long) retval
);
3715 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
3719 retval
= debug_target
.to_stopped_data_address (target
, addr
);
3721 fprintf_unfiltered (gdb_stdlog
,
3722 "target_stopped_data_address ([%s]) = %ld\n",
3723 core_addr_to_string (*addr
),
3724 (unsigned long)retval
);
3729 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
3731 CORE_ADDR start
, int length
)
3735 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
3738 fprintf_filtered (gdb_stdlog
,
3739 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3740 core_addr_to_string (addr
), core_addr_to_string (start
),
3746 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
3747 struct gdbarch
*gdbarch
,
3748 struct bp_target_info
*bp_tgt
)
3752 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
3755 fprintf_unfiltered (gdb_stdlog
,
3756 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3757 core_addr_to_string (bp_tgt
->placed_address
),
3758 (unsigned long) retval
);
3763 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
3764 struct gdbarch
*gdbarch
,
3765 struct bp_target_info
*bp_tgt
)
3769 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
3772 fprintf_unfiltered (gdb_stdlog
,
3773 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3774 core_addr_to_string (bp_tgt
->placed_address
),
3775 (unsigned long) retval
);
3780 debug_to_insert_watchpoint (struct target_ops
*self
,
3781 CORE_ADDR addr
, int len
, int type
,
3782 struct expression
*cond
)
3786 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
3787 addr
, len
, type
, cond
);
3789 fprintf_unfiltered (gdb_stdlog
,
3790 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3791 core_addr_to_string (addr
), len
, type
,
3792 host_address_to_string (cond
), (unsigned long) retval
);
3797 debug_to_remove_watchpoint (struct target_ops
*self
,
3798 CORE_ADDR addr
, int len
, int type
,
3799 struct expression
*cond
)
3803 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
3804 addr
, len
, type
, cond
);
3806 fprintf_unfiltered (gdb_stdlog
,
3807 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3808 core_addr_to_string (addr
), len
, type
,
3809 host_address_to_string (cond
), (unsigned long) retval
);
3814 debug_to_terminal_init (struct target_ops
*self
)
3816 debug_target
.to_terminal_init (&debug_target
);
3818 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
3822 debug_to_terminal_inferior (struct target_ops
*self
)
3824 debug_target
.to_terminal_inferior (&debug_target
);
3826 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
3830 debug_to_terminal_ours_for_output (struct target_ops
*self
)
3832 debug_target
.to_terminal_ours_for_output (&debug_target
);
3834 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
3838 debug_to_terminal_ours (struct target_ops
*self
)
3840 debug_target
.to_terminal_ours (&debug_target
);
3842 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
3846 debug_to_terminal_save_ours (struct target_ops
*self
)
3848 debug_target
.to_terminal_save_ours (&debug_target
);
3850 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
3854 debug_to_terminal_info (struct target_ops
*self
,
3855 const char *arg
, int from_tty
)
3857 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
3859 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
3864 debug_to_load (struct target_ops
*self
, const char *args
, int from_tty
)
3866 debug_target
.to_load (&debug_target
, args
, from_tty
);
3868 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
3872 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
3874 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
3876 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
3877 ptid_get_pid (ptid
));
3881 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
3885 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
3887 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
3894 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
3898 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
3900 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
3907 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
3911 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
3913 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
3920 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
3924 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
3926 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
3933 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
3937 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
3939 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
3946 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
3950 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
3952 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
3959 debug_to_has_exited (struct target_ops
*self
,
3960 int pid
, int wait_status
, int *exit_status
)
3964 has_exited
= debug_target
.to_has_exited (&debug_target
,
3965 pid
, wait_status
, exit_status
);
3967 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
3968 pid
, wait_status
, *exit_status
, has_exited
);
3974 debug_to_can_run (struct target_ops
*self
)
3978 retval
= debug_target
.to_can_run (&debug_target
);
3980 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
3985 static struct gdbarch
*
3986 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3988 struct gdbarch
*retval
;
3990 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
3992 fprintf_unfiltered (gdb_stdlog
,
3993 "target_thread_architecture (%s) = %s [%s]\n",
3994 target_pid_to_str (ptid
),
3995 host_address_to_string (retval
),
3996 gdbarch_bfd_arch_info (retval
)->printable_name
);
4001 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4003 debug_target
.to_stop (&debug_target
, ptid
);
4005 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4006 target_pid_to_str (ptid
));
4010 debug_to_rcmd (struct target_ops
*self
, const char *command
,
4011 struct ui_file
*outbuf
)
4013 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4014 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4018 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4022 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4024 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4031 setup_target_debug (void)
4033 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4035 current_target
.to_open
= debug_to_open
;
4036 current_target
.to_post_attach
= debug_to_post_attach
;
4037 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4038 current_target
.to_files_info
= debug_to_files_info
;
4039 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4040 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4041 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4042 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4043 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4044 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4045 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4046 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4047 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4048 current_target
.to_watchpoint_addr_within_range
4049 = debug_to_watchpoint_addr_within_range
;
4050 current_target
.to_region_ok_for_hw_watchpoint
4051 = debug_to_region_ok_for_hw_watchpoint
;
4052 current_target
.to_can_accel_watchpoint_condition
4053 = debug_to_can_accel_watchpoint_condition
;
4054 current_target
.to_terminal_init
= debug_to_terminal_init
;
4055 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4056 current_target
.to_terminal_ours_for_output
4057 = debug_to_terminal_ours_for_output
;
4058 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4059 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4060 current_target
.to_terminal_info
= debug_to_terminal_info
;
4061 current_target
.to_load
= debug_to_load
;
4062 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4063 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4064 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4065 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4066 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4067 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4068 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4069 current_target
.to_has_exited
= debug_to_has_exited
;
4070 current_target
.to_can_run
= debug_to_can_run
;
4071 current_target
.to_stop
= debug_to_stop
;
4072 current_target
.to_rcmd
= debug_to_rcmd
;
4073 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4074 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4078 static char targ_desc
[] =
4079 "Names of targets and files being debugged.\nShows the entire \
4080 stack of targets currently in use (including the exec-file,\n\
4081 core-file, and process, if any), as well as the symbol file name.";
4084 default_rcmd (struct target_ops
*self
, const char *command
,
4085 struct ui_file
*output
)
4087 error (_("\"monitor\" command not supported by this target."));
4091 do_monitor_command (char *cmd
,
4094 target_rcmd (cmd
, gdb_stdtarg
);
4097 /* Print the name of each layers of our target stack. */
4100 maintenance_print_target_stack (char *cmd
, int from_tty
)
4102 struct target_ops
*t
;
4104 printf_filtered (_("The current target stack is:\n"));
4106 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4108 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4112 /* Controls if targets can report that they can/are async. This is
4113 just for maintainers to use when debugging gdb. */
4114 int target_async_permitted
= 1;
4116 /* The set command writes to this variable. If the inferior is
4117 executing, target_async_permitted is *not* updated. */
4118 static int target_async_permitted_1
= 1;
4121 maint_set_target_async_command (char *args
, int from_tty
,
4122 struct cmd_list_element
*c
)
4124 if (have_live_inferiors ())
4126 target_async_permitted_1
= target_async_permitted
;
4127 error (_("Cannot change this setting while the inferior is running."));
4130 target_async_permitted
= target_async_permitted_1
;
4134 maint_show_target_async_command (struct ui_file
*file
, int from_tty
,
4135 struct cmd_list_element
*c
,
4138 fprintf_filtered (file
,
4139 _("Controlling the inferior in "
4140 "asynchronous mode is %s.\n"), value
);
4143 /* Temporary copies of permission settings. */
4145 static int may_write_registers_1
= 1;
4146 static int may_write_memory_1
= 1;
4147 static int may_insert_breakpoints_1
= 1;
4148 static int may_insert_tracepoints_1
= 1;
4149 static int may_insert_fast_tracepoints_1
= 1;
4150 static int may_stop_1
= 1;
4152 /* Make the user-set values match the real values again. */
4155 update_target_permissions (void)
4157 may_write_registers_1
= may_write_registers
;
4158 may_write_memory_1
= may_write_memory
;
4159 may_insert_breakpoints_1
= may_insert_breakpoints
;
4160 may_insert_tracepoints_1
= may_insert_tracepoints
;
4161 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4162 may_stop_1
= may_stop
;
4165 /* The one function handles (most of) the permission flags in the same
4169 set_target_permissions (char *args
, int from_tty
,
4170 struct cmd_list_element
*c
)
4172 if (target_has_execution
)
4174 update_target_permissions ();
4175 error (_("Cannot change this setting while the inferior is running."));
4178 /* Make the real values match the user-changed values. */
4179 may_write_registers
= may_write_registers_1
;
4180 may_insert_breakpoints
= may_insert_breakpoints_1
;
4181 may_insert_tracepoints
= may_insert_tracepoints_1
;
4182 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4183 may_stop
= may_stop_1
;
4184 update_observer_mode ();
4187 /* Set memory write permission independently of observer mode. */
4190 set_write_memory_permission (char *args
, int from_tty
,
4191 struct cmd_list_element
*c
)
4193 /* Make the real values match the user-changed values. */
4194 may_write_memory
= may_write_memory_1
;
4195 update_observer_mode ();
4200 initialize_targets (void)
4202 init_dummy_target ();
4203 push_target (&dummy_target
);
4205 add_info ("target", target_info
, targ_desc
);
4206 add_info ("files", target_info
, targ_desc
);
4208 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4209 Set target debugging."), _("\
4210 Show target debugging."), _("\
4211 When non-zero, target debugging is enabled. Higher numbers are more\n\
4212 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4216 &setdebuglist
, &showdebuglist
);
4218 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4219 &trust_readonly
, _("\
4220 Set mode for reading from readonly sections."), _("\
4221 Show mode for reading from readonly sections."), _("\
4222 When this mode is on, memory reads from readonly sections (such as .text)\n\
4223 will be read from the object file instead of from the target. This will\n\
4224 result in significant performance improvement for remote targets."),
4226 show_trust_readonly
,
4227 &setlist
, &showlist
);
4229 add_com ("monitor", class_obscure
, do_monitor_command
,
4230 _("Send a command to the remote monitor (remote targets only)."));
4232 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4233 _("Print the name of each layer of the internal target stack."),
4234 &maintenanceprintlist
);
4236 add_setshow_boolean_cmd ("target-async", no_class
,
4237 &target_async_permitted_1
, _("\
4238 Set whether gdb controls the inferior in asynchronous mode."), _("\
4239 Show whether gdb controls the inferior in asynchronous mode."), _("\
4240 Tells gdb whether to control the inferior in asynchronous mode."),
4241 maint_set_target_async_command
,
4242 maint_show_target_async_command
,
4243 &maintenance_set_cmdlist
,
4244 &maintenance_show_cmdlist
);
4246 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4247 &may_write_registers_1
, _("\
4248 Set permission to write into registers."), _("\
4249 Show permission to write into registers."), _("\
4250 When this permission is on, GDB may write into the target's registers.\n\
4251 Otherwise, any sort of write attempt will result in an error."),
4252 set_target_permissions
, NULL
,
4253 &setlist
, &showlist
);
4255 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4256 &may_write_memory_1
, _("\
4257 Set permission to write into target memory."), _("\
4258 Show permission to write into target memory."), _("\
4259 When this permission is on, GDB may write into the target's memory.\n\
4260 Otherwise, any sort of write attempt will result in an error."),
4261 set_write_memory_permission
, NULL
,
4262 &setlist
, &showlist
);
4264 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4265 &may_insert_breakpoints_1
, _("\
4266 Set permission to insert breakpoints in the target."), _("\
4267 Show permission to insert breakpoints in the target."), _("\
4268 When this permission is on, GDB may insert breakpoints in the program.\n\
4269 Otherwise, any sort of insertion attempt will result in an error."),
4270 set_target_permissions
, NULL
,
4271 &setlist
, &showlist
);
4273 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4274 &may_insert_tracepoints_1
, _("\
4275 Set permission to insert tracepoints in the target."), _("\
4276 Show permission to insert tracepoints in the target."), _("\
4277 When this permission is on, GDB may insert tracepoints in the program.\n\
4278 Otherwise, any sort of insertion attempt will result in an error."),
4279 set_target_permissions
, NULL
,
4280 &setlist
, &showlist
);
4282 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4283 &may_insert_fast_tracepoints_1
, _("\
4284 Set permission to insert fast tracepoints in the target."), _("\
4285 Show permission to insert fast tracepoints in the target."), _("\
4286 When this permission is on, GDB may insert fast tracepoints.\n\
4287 Otherwise, any sort of insertion attempt will result in an error."),
4288 set_target_permissions
, NULL
,
4289 &setlist
, &showlist
);
4291 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4293 Set permission to interrupt or signal the target."), _("\
4294 Show permission to interrupt or signal the target."), _("\
4295 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4296 Otherwise, any attempt to interrupt or stop will be ignored."),
4297 set_target_permissions
, NULL
,
4298 &setlist
, &showlist
);
4300 add_setshow_boolean_cmd ("auto-connect-native-target", class_support
,
4301 &auto_connect_native_target
, _("\
4302 Set whether GDB may automatically connect to the native target."), _("\
4303 Show whether GDB may automatically connect to the native target."), _("\
4304 When on, and GDB is not connected to a target yet, GDB\n\
4305 attempts \"run\" and other commands with the native target."),
4306 NULL
, show_auto_connect_native_target
,
4307 &setlist
, &showlist
);