Add target_ops argument to to_get_trace_state_variable_value
[deliverable/binutils-gdb.git] / gdb / target.c
... / ...
CommitLineData
1/* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include <errno.h>
24#include <string.h>
25#include "target.h"
26#include "target-dcache.h"
27#include "gdbcmd.h"
28#include "symtab.h"
29#include "inferior.h"
30#include "bfd.h"
31#include "symfile.h"
32#include "objfiles.h"
33#include "dcache.h"
34#include <signal.h>
35#include "regcache.h"
36#include "gdb_assert.h"
37#include "gdbcore.h"
38#include "exceptions.h"
39#include "target-descriptions.h"
40#include "gdbthread.h"
41#include "solib.h"
42#include "exec.h"
43#include "inline-frame.h"
44#include "tracepoint.h"
45#include "gdb/fileio.h"
46#include "agent.h"
47
48static void target_info (char *, int);
49
50static void default_terminal_info (struct target_ops *, const char *, int);
51
52static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62static int return_zero (void);
63
64static int return_one (void);
65
66static int return_minus_one (void);
67
68static void *return_null (void);
69
70void target_ignore (void);
71
72static void target_command (char *, int);
73
74static struct target_ops *find_default_run_target (char *);
75
76static target_xfer_partial_ftype default_xfer_partial;
77
78static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81static int find_default_can_async_p (struct target_ops *ignore);
82
83static int find_default_is_async_p (struct target_ops *ignore);
84
85#include "target-delegates.c"
86
87static void init_dummy_target (void);
88
89static struct target_ops debug_target;
90
91static void debug_to_open (char *, int);
92
93static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96static void debug_to_files_info (struct target_ops *);
97
98static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135static void debug_to_terminal_init (struct target_ops *self);
136
137static void debug_to_terminal_inferior (struct target_ops *self);
138
139static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141static void debug_to_terminal_save_ours (struct target_ops *self);
142
143static void debug_to_terminal_ours (struct target_ops *self);
144
145static void debug_to_load (struct target_ops *self, char *, int);
146
147static int debug_to_can_run (struct target_ops *self);
148
149static void debug_to_stop (struct target_ops *self, ptid_t);
150
151/* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154struct target_ops **target_structs;
155unsigned target_struct_size;
156unsigned target_struct_allocsize;
157#define DEFAULT_ALLOCSIZE 10
158
159/* The initial current target, so that there is always a semi-valid
160 current target. */
161
162static struct target_ops dummy_target;
163
164/* Top of target stack. */
165
166static struct target_ops *target_stack;
167
168/* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171struct target_ops current_target;
172
173/* Command list for target. */
174
175static struct cmd_list_element *targetlist = NULL;
176
177/* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180static int trust_readonly = 0;
181
182/* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185static int show_memory_breakpoints = 0;
186
187/* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191int may_write_registers = 1;
192
193int may_write_memory = 1;
194
195int may_insert_breakpoints = 1;
196
197int may_insert_tracepoints = 1;
198
199int may_insert_fast_tracepoints = 1;
200
201int may_stop = 1;
202
203/* Non-zero if we want to see trace of target level stuff. */
204
205static unsigned int targetdebug = 0;
206static void
207show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209{
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211}
212
213static void setup_target_debug (void);
214
215/* The user just typed 'target' without the name of a target. */
216
217static void
218target_command (char *arg, int from_tty)
219{
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222}
223
224/* Default target_has_* methods for process_stratum targets. */
225
226int
227default_child_has_all_memory (struct target_ops *ops)
228{
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234}
235
236int
237default_child_has_memory (struct target_ops *ops)
238{
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244}
245
246int
247default_child_has_stack (struct target_ops *ops)
248{
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254}
255
256int
257default_child_has_registers (struct target_ops *ops)
258{
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264}
265
266int
267default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268{
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275}
276
277
278int
279target_has_all_memory_1 (void)
280{
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288}
289
290int
291target_has_memory_1 (void)
292{
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300}
301
302int
303target_has_stack_1 (void)
304{
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312}
313
314int
315target_has_registers_1 (void)
316{
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324}
325
326int
327target_has_execution_1 (ptid_t the_ptid)
328{
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336}
337
338int
339target_has_execution_current (void)
340{
341 return target_has_execution_1 (inferior_ptid);
342}
343
344/* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347void
348complete_target_initialization (struct target_ops *t)
349{
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370}
371
372/* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376void
377add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379{
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401Connect to a target machine or process.\n\
402The first argument is the type or protocol of the target machine.\n\
403Remaining arguments are interpreted by the target protocol. For more\n\
404information on the arguments for a particular protocol, type\n\
405`help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411}
412
413/* Add a possible target architecture to the list. */
414
415void
416add_target (struct target_ops *t)
417{
418 add_target_with_completer (t, NULL);
419}
420
421/* See target.h. */
422
423void
424add_deprecated_target_alias (struct target_ops *t, char *alias)
425{
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434}
435
436/* Stub functions */
437
438void
439target_ignore (void)
440{
441}
442
443void
444target_kill (void)
445{
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459}
460
461void
462target_load (char *arg, int from_tty)
463{
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466}
467
468void
469target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471{
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489}
490
491void
492target_terminal_inferior (void)
493{
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504}
505
506static int
507nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509{
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512}
513
514static void
515tcomplain (void)
516{
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519}
520
521void
522noprocess (void)
523{
524 error (_("You can't do that without a process to debug."));
525}
526
527static void
528default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529{
530 printf_unfiltered (_("No saved terminal information.\n"));
531}
532
533/* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539static ptid_t
540default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541{
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543}
544
545static enum exec_direction_kind
546default_execution_direction (struct target_ops *self)
547{
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554to_execution_direction must be implemented for reverse async");
555}
556
557/* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569static void
570update_current_target (void)
571{
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580#define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach, t);
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store, t);
601 INHERIT (deprecated_xfer_memory, t);
602 INHERIT (to_files_info, t);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint, t);
606 INHERIT (to_insert_hw_breakpoint, t);
607 INHERIT (to_remove_hw_breakpoint, t);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint, t);
610 INHERIT (to_remove_watchpoint, t);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range, t);
618 INHERIT (to_region_ok_for_hw_watchpoint, t);
619 INHERIT (to_can_accel_watchpoint_condition, t);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init, t);
622 INHERIT (to_terminal_inferior, t);
623 INHERIT (to_terminal_ours_for_output, t);
624 INHERIT (to_terminal_ours, t);
625 INHERIT (to_terminal_save_ours, t);
626 INHERIT (to_terminal_info, t);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load, t);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior, t);
631 INHERIT (to_insert_fork_catchpoint, t);
632 INHERIT (to_remove_fork_catchpoint, t);
633 INHERIT (to_insert_vfork_catchpoint, t);
634 INHERIT (to_remove_vfork_catchpoint, t);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint, t);
637 INHERIT (to_remove_exec_catchpoint, t);
638 INHERIT (to_set_syscall_catchpoint, t);
639 INHERIT (to_has_exited, t);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info, t);
648 INHERIT (to_thread_name, t);
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd, t);
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715#undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721#define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_post_attach,
732 (void (*) (struct target_ops *, int))
733 target_ignore);
734 de_fault (to_prepare_to_store,
735 (void (*) (struct target_ops *, struct regcache *))
736 noprocess);
737 de_fault (deprecated_xfer_memory,
738 (int (*) (CORE_ADDR, gdb_byte *, int, int,
739 struct mem_attrib *, struct target_ops *))
740 nomemory);
741 de_fault (to_files_info,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (struct target_ops *, int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_remove_hw_breakpoint,
752 (int (*) (struct target_ops *, struct gdbarch *,
753 struct bp_target_info *))
754 return_minus_one);
755 de_fault (to_insert_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_remove_watchpoint,
760 (int (*) (struct target_ops *, CORE_ADDR, int, int,
761 struct expression *))
762 return_minus_one);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (struct target_ops *, CORE_ADDR, int, int,
769 struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (struct target_ops *))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (struct target_ops *))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (struct target_ops *))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (struct target_ops *))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (struct target_ops *, char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (struct target_ops *, ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (struct target_ops *, int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (struct target_ops *, int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (struct target_ops *, int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (struct target_ops *, int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (struct target_ops *, int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (struct target_ops *, int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (struct target_ops *, int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (struct target_ops *, int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 (int (*) (struct target_ops *))
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct target_ops *, struct thread_info *))
823 return_null);
824 de_fault (to_thread_name,
825 (char *(*) (struct target_ops *, struct thread_info *))
826 return_null);
827 de_fault (to_stop,
828 (void (*) (struct target_ops *, ptid_t))
829 target_ignore);
830 de_fault (to_rcmd,
831 (void (*) (struct target_ops *, char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (struct target_ops *, int))
835 return_null);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (struct target_ops *, long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (struct target_ops *))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (struct target_ops *))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (struct target_ops *))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct target_ops *, struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (struct target_ops *))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct target_ops *, struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct target_ops *, struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct target_ops *, struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (struct target_ops *))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (struct target_ops *))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct target_ops *, struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct target_ops *, struct breakpoint *,
880 struct uploaded_tp *))
881 tcomplain);
882 de_fault (to_trace_stop,
883 (void (*) (struct target_ops *))
884 tcomplain);
885 de_fault (to_trace_find,
886 (int (*) (struct target_ops *,
887 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
888 return_minus_one);
889 de_fault (to_get_trace_state_variable_value,
890 (int (*) (struct target_ops *, int, LONGEST *))
891 return_zero);
892 de_fault (to_save_trace_data,
893 (int (*) (const char *))
894 tcomplain);
895 de_fault (to_upload_tracepoints,
896 (int (*) (struct uploaded_tp **))
897 return_zero);
898 de_fault (to_upload_trace_state_variables,
899 (int (*) (struct uploaded_tsv **))
900 return_zero);
901 de_fault (to_get_raw_trace_data,
902 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
903 tcomplain);
904 de_fault (to_get_min_fast_tracepoint_insn_len,
905 (int (*) (void))
906 return_minus_one);
907 de_fault (to_set_disconnected_tracing,
908 (void (*) (int))
909 target_ignore);
910 de_fault (to_set_circular_trace_buffer,
911 (void (*) (int))
912 target_ignore);
913 de_fault (to_set_trace_buffer_size,
914 (void (*) (LONGEST))
915 target_ignore);
916 de_fault (to_set_trace_notes,
917 (int (*) (const char *, const char *, const char *))
918 return_zero);
919 de_fault (to_get_tib_address,
920 (int (*) (ptid_t, CORE_ADDR *))
921 tcomplain);
922 de_fault (to_set_permissions,
923 (void (*) (void))
924 target_ignore);
925 de_fault (to_static_tracepoint_marker_at,
926 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
927 return_zero);
928 de_fault (to_static_tracepoint_markers_by_strid,
929 (VEC(static_tracepoint_marker_p) * (*) (const char *))
930 tcomplain);
931 de_fault (to_traceframe_info,
932 (struct traceframe_info * (*) (void))
933 return_null);
934 de_fault (to_supports_evaluation_of_breakpoint_conditions,
935 (int (*) (struct target_ops *))
936 return_zero);
937 de_fault (to_can_run_breakpoint_commands,
938 (int (*) (struct target_ops *))
939 return_zero);
940 de_fault (to_use_agent,
941 (int (*) (int))
942 tcomplain);
943 de_fault (to_can_use_agent,
944 (int (*) (void))
945 return_zero);
946 de_fault (to_augmented_libraries_svr4_read,
947 (int (*) (void))
948 return_zero);
949 de_fault (to_execution_direction, default_execution_direction);
950
951#undef de_fault
952
953 /* Finally, position the target-stack beneath the squashed
954 "current_target". That way code looking for a non-inherited
955 target method can quickly and simply find it. */
956 current_target.beneath = target_stack;
957
958 if (targetdebug)
959 setup_target_debug ();
960}
961
962/* Push a new target type into the stack of the existing target accessors,
963 possibly superseding some of the existing accessors.
964
965 Rather than allow an empty stack, we always have the dummy target at
966 the bottom stratum, so we can call the function vectors without
967 checking them. */
968
969void
970push_target (struct target_ops *t)
971{
972 struct target_ops **cur;
973
974 /* Check magic number. If wrong, it probably means someone changed
975 the struct definition, but not all the places that initialize one. */
976 if (t->to_magic != OPS_MAGIC)
977 {
978 fprintf_unfiltered (gdb_stderr,
979 "Magic number of %s target struct wrong\n",
980 t->to_shortname);
981 internal_error (__FILE__, __LINE__,
982 _("failed internal consistency check"));
983 }
984
985 /* Find the proper stratum to install this target in. */
986 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
987 {
988 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
989 break;
990 }
991
992 /* If there's already targets at this stratum, remove them. */
993 /* FIXME: cagney/2003-10-15: I think this should be popping all
994 targets to CUR, and not just those at this stratum level. */
995 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
996 {
997 /* There's already something at this stratum level. Close it,
998 and un-hook it from the stack. */
999 struct target_ops *tmp = (*cur);
1000
1001 (*cur) = (*cur)->beneath;
1002 tmp->beneath = NULL;
1003 target_close (tmp);
1004 }
1005
1006 /* We have removed all targets in our stratum, now add the new one. */
1007 t->beneath = (*cur);
1008 (*cur) = t;
1009
1010 update_current_target ();
1011}
1012
1013/* Remove a target_ops vector from the stack, wherever it may be.
1014 Return how many times it was removed (0 or 1). */
1015
1016int
1017unpush_target (struct target_ops *t)
1018{
1019 struct target_ops **cur;
1020 struct target_ops *tmp;
1021
1022 if (t->to_stratum == dummy_stratum)
1023 internal_error (__FILE__, __LINE__,
1024 _("Attempt to unpush the dummy target"));
1025
1026 /* Look for the specified target. Note that we assume that a target
1027 can only occur once in the target stack. */
1028
1029 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1030 {
1031 if ((*cur) == t)
1032 break;
1033 }
1034
1035 /* If we don't find target_ops, quit. Only open targets should be
1036 closed. */
1037 if ((*cur) == NULL)
1038 return 0;
1039
1040 /* Unchain the target. */
1041 tmp = (*cur);
1042 (*cur) = (*cur)->beneath;
1043 tmp->beneath = NULL;
1044
1045 update_current_target ();
1046
1047 /* Finally close the target. Note we do this after unchaining, so
1048 any target method calls from within the target_close
1049 implementation don't end up in T anymore. */
1050 target_close (t);
1051
1052 return 1;
1053}
1054
1055void
1056pop_all_targets_above (enum strata above_stratum)
1057{
1058 while ((int) (current_target.to_stratum) > (int) above_stratum)
1059 {
1060 if (!unpush_target (target_stack))
1061 {
1062 fprintf_unfiltered (gdb_stderr,
1063 "pop_all_targets couldn't find target %s\n",
1064 target_stack->to_shortname);
1065 internal_error (__FILE__, __LINE__,
1066 _("failed internal consistency check"));
1067 break;
1068 }
1069 }
1070}
1071
1072void
1073pop_all_targets (void)
1074{
1075 pop_all_targets_above (dummy_stratum);
1076}
1077
1078/* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1079
1080int
1081target_is_pushed (struct target_ops *t)
1082{
1083 struct target_ops **cur;
1084
1085 /* Check magic number. If wrong, it probably means someone changed
1086 the struct definition, but not all the places that initialize one. */
1087 if (t->to_magic != OPS_MAGIC)
1088 {
1089 fprintf_unfiltered (gdb_stderr,
1090 "Magic number of %s target struct wrong\n",
1091 t->to_shortname);
1092 internal_error (__FILE__, __LINE__,
1093 _("failed internal consistency check"));
1094 }
1095
1096 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1097 if (*cur == t)
1098 return 1;
1099
1100 return 0;
1101}
1102
1103/* Using the objfile specified in OBJFILE, find the address for the
1104 current thread's thread-local storage with offset OFFSET. */
1105CORE_ADDR
1106target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1107{
1108 volatile CORE_ADDR addr = 0;
1109 struct target_ops *target;
1110
1111 for (target = current_target.beneath;
1112 target != NULL;
1113 target = target->beneath)
1114 {
1115 if (target->to_get_thread_local_address != NULL)
1116 break;
1117 }
1118
1119 if (target != NULL
1120 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1121 {
1122 ptid_t ptid = inferior_ptid;
1123 volatile struct gdb_exception ex;
1124
1125 TRY_CATCH (ex, RETURN_MASK_ALL)
1126 {
1127 CORE_ADDR lm_addr;
1128
1129 /* Fetch the load module address for this objfile. */
1130 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1131 objfile);
1132 /* If it's 0, throw the appropriate exception. */
1133 if (lm_addr == 0)
1134 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1135 _("TLS load module not found"));
1136
1137 addr = target->to_get_thread_local_address (target, ptid,
1138 lm_addr, offset);
1139 }
1140 /* If an error occurred, print TLS related messages here. Otherwise,
1141 throw the error to some higher catcher. */
1142 if (ex.reason < 0)
1143 {
1144 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1145
1146 switch (ex.error)
1147 {
1148 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1149 error (_("Cannot find thread-local variables "
1150 "in this thread library."));
1151 break;
1152 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1153 if (objfile_is_library)
1154 error (_("Cannot find shared library `%s' in dynamic"
1155 " linker's load module list"), objfile_name (objfile));
1156 else
1157 error (_("Cannot find executable file `%s' in dynamic"
1158 " linker's load module list"), objfile_name (objfile));
1159 break;
1160 case TLS_NOT_ALLOCATED_YET_ERROR:
1161 if (objfile_is_library)
1162 error (_("The inferior has not yet allocated storage for"
1163 " thread-local variables in\n"
1164 "the shared library `%s'\n"
1165 "for %s"),
1166 objfile_name (objfile), target_pid_to_str (ptid));
1167 else
1168 error (_("The inferior has not yet allocated storage for"
1169 " thread-local variables in\n"
1170 "the executable `%s'\n"
1171 "for %s"),
1172 objfile_name (objfile), target_pid_to_str (ptid));
1173 break;
1174 case TLS_GENERIC_ERROR:
1175 if (objfile_is_library)
1176 error (_("Cannot find thread-local storage for %s, "
1177 "shared library %s:\n%s"),
1178 target_pid_to_str (ptid),
1179 objfile_name (objfile), ex.message);
1180 else
1181 error (_("Cannot find thread-local storage for %s, "
1182 "executable file %s:\n%s"),
1183 target_pid_to_str (ptid),
1184 objfile_name (objfile), ex.message);
1185 break;
1186 default:
1187 throw_exception (ex);
1188 break;
1189 }
1190 }
1191 }
1192 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1193 TLS is an ABI-specific thing. But we don't do that yet. */
1194 else
1195 error (_("Cannot find thread-local variables on this target"));
1196
1197 return addr;
1198}
1199
1200const char *
1201target_xfer_status_to_string (enum target_xfer_status err)
1202{
1203#define CASE(X) case X: return #X
1204 switch (err)
1205 {
1206 CASE(TARGET_XFER_E_IO);
1207 CASE(TARGET_XFER_E_UNAVAILABLE);
1208 default:
1209 return "<unknown>";
1210 }
1211#undef CASE
1212};
1213
1214
1215#undef MIN
1216#define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1217
1218/* target_read_string -- read a null terminated string, up to LEN bytes,
1219 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1220 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1221 is responsible for freeing it. Return the number of bytes successfully
1222 read. */
1223
1224int
1225target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1226{
1227 int tlen, offset, i;
1228 gdb_byte buf[4];
1229 int errcode = 0;
1230 char *buffer;
1231 int buffer_allocated;
1232 char *bufptr;
1233 unsigned int nbytes_read = 0;
1234
1235 gdb_assert (string);
1236
1237 /* Small for testing. */
1238 buffer_allocated = 4;
1239 buffer = xmalloc (buffer_allocated);
1240 bufptr = buffer;
1241
1242 while (len > 0)
1243 {
1244 tlen = MIN (len, 4 - (memaddr & 3));
1245 offset = memaddr & 3;
1246
1247 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1248 if (errcode != 0)
1249 {
1250 /* The transfer request might have crossed the boundary to an
1251 unallocated region of memory. Retry the transfer, requesting
1252 a single byte. */
1253 tlen = 1;
1254 offset = 0;
1255 errcode = target_read_memory (memaddr, buf, 1);
1256 if (errcode != 0)
1257 goto done;
1258 }
1259
1260 if (bufptr - buffer + tlen > buffer_allocated)
1261 {
1262 unsigned int bytes;
1263
1264 bytes = bufptr - buffer;
1265 buffer_allocated *= 2;
1266 buffer = xrealloc (buffer, buffer_allocated);
1267 bufptr = buffer + bytes;
1268 }
1269
1270 for (i = 0; i < tlen; i++)
1271 {
1272 *bufptr++ = buf[i + offset];
1273 if (buf[i + offset] == '\000')
1274 {
1275 nbytes_read += i + 1;
1276 goto done;
1277 }
1278 }
1279
1280 memaddr += tlen;
1281 len -= tlen;
1282 nbytes_read += tlen;
1283 }
1284done:
1285 *string = buffer;
1286 if (errnop != NULL)
1287 *errnop = errcode;
1288 return nbytes_read;
1289}
1290
1291struct target_section_table *
1292target_get_section_table (struct target_ops *target)
1293{
1294 struct target_ops *t;
1295
1296 if (targetdebug)
1297 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1298
1299 for (t = target; t != NULL; t = t->beneath)
1300 if (t->to_get_section_table != NULL)
1301 return (*t->to_get_section_table) (t);
1302
1303 return NULL;
1304}
1305
1306/* Find a section containing ADDR. */
1307
1308struct target_section *
1309target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1310{
1311 struct target_section_table *table = target_get_section_table (target);
1312 struct target_section *secp;
1313
1314 if (table == NULL)
1315 return NULL;
1316
1317 for (secp = table->sections; secp < table->sections_end; secp++)
1318 {
1319 if (addr >= secp->addr && addr < secp->endaddr)
1320 return secp;
1321 }
1322 return NULL;
1323}
1324
1325/* Read memory from the live target, even if currently inspecting a
1326 traceframe. The return is the same as that of target_read. */
1327
1328static enum target_xfer_status
1329target_read_live_memory (enum target_object object,
1330 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1331 ULONGEST *xfered_len)
1332{
1333 enum target_xfer_status ret;
1334 struct cleanup *cleanup;
1335
1336 /* Switch momentarily out of tfind mode so to access live memory.
1337 Note that this must not clear global state, such as the frame
1338 cache, which must still remain valid for the previous traceframe.
1339 We may be _building_ the frame cache at this point. */
1340 cleanup = make_cleanup_restore_traceframe_number ();
1341 set_traceframe_number (-1);
1342
1343 ret = target_xfer_partial (current_target.beneath, object, NULL,
1344 myaddr, NULL, memaddr, len, xfered_len);
1345
1346 do_cleanups (cleanup);
1347 return ret;
1348}
1349
1350/* Using the set of read-only target sections of OPS, read live
1351 read-only memory. Note that the actual reads start from the
1352 top-most target again.
1353
1354 For interface/parameters/return description see target.h,
1355 to_xfer_partial. */
1356
1357static enum target_xfer_status
1358memory_xfer_live_readonly_partial (struct target_ops *ops,
1359 enum target_object object,
1360 gdb_byte *readbuf, ULONGEST memaddr,
1361 ULONGEST len, ULONGEST *xfered_len)
1362{
1363 struct target_section *secp;
1364 struct target_section_table *table;
1365
1366 secp = target_section_by_addr (ops, memaddr);
1367 if (secp != NULL
1368 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1369 secp->the_bfd_section)
1370 & SEC_READONLY))
1371 {
1372 struct target_section *p;
1373 ULONGEST memend = memaddr + len;
1374
1375 table = target_get_section_table (ops);
1376
1377 for (p = table->sections; p < table->sections_end; p++)
1378 {
1379 if (memaddr >= p->addr)
1380 {
1381 if (memend <= p->endaddr)
1382 {
1383 /* Entire transfer is within this section. */
1384 return target_read_live_memory (object, memaddr,
1385 readbuf, len, xfered_len);
1386 }
1387 else if (memaddr >= p->endaddr)
1388 {
1389 /* This section ends before the transfer starts. */
1390 continue;
1391 }
1392 else
1393 {
1394 /* This section overlaps the transfer. Just do half. */
1395 len = p->endaddr - memaddr;
1396 return target_read_live_memory (object, memaddr,
1397 readbuf, len, xfered_len);
1398 }
1399 }
1400 }
1401 }
1402
1403 return TARGET_XFER_EOF;
1404}
1405
1406/* Read memory from more than one valid target. A core file, for
1407 instance, could have some of memory but delegate other bits to
1408 the target below it. So, we must manually try all targets. */
1409
1410static enum target_xfer_status
1411raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1413 ULONGEST *xfered_len)
1414{
1415 enum target_xfer_status res;
1416
1417 do
1418 {
1419 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1420 readbuf, writebuf, memaddr, len,
1421 xfered_len);
1422 if (res == TARGET_XFER_OK)
1423 break;
1424
1425 /* Stop if the target reports that the memory is not available. */
1426 if (res == TARGET_XFER_E_UNAVAILABLE)
1427 break;
1428
1429 /* We want to continue past core files to executables, but not
1430 past a running target's memory. */
1431 if (ops->to_has_all_memory (ops))
1432 break;
1433
1434 ops = ops->beneath;
1435 }
1436 while (ops != NULL);
1437
1438 return res;
1439}
1440
1441/* Perform a partial memory transfer.
1442 For docs see target.h, to_xfer_partial. */
1443
1444static enum target_xfer_status
1445memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1446 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1447 ULONGEST len, ULONGEST *xfered_len)
1448{
1449 enum target_xfer_status res;
1450 int reg_len;
1451 struct mem_region *region;
1452 struct inferior *inf;
1453
1454 /* For accesses to unmapped overlay sections, read directly from
1455 files. Must do this first, as MEMADDR may need adjustment. */
1456 if (readbuf != NULL && overlay_debugging)
1457 {
1458 struct obj_section *section = find_pc_overlay (memaddr);
1459
1460 if (pc_in_unmapped_range (memaddr, section))
1461 {
1462 struct target_section_table *table
1463 = target_get_section_table (ops);
1464 const char *section_name = section->the_bfd_section->name;
1465
1466 memaddr = overlay_mapped_address (memaddr, section);
1467 return section_table_xfer_memory_partial (readbuf, writebuf,
1468 memaddr, len, xfered_len,
1469 table->sections,
1470 table->sections_end,
1471 section_name);
1472 }
1473 }
1474
1475 /* Try the executable files, if "trust-readonly-sections" is set. */
1476 if (readbuf != NULL && trust_readonly)
1477 {
1478 struct target_section *secp;
1479 struct target_section_table *table;
1480
1481 secp = target_section_by_addr (ops, memaddr);
1482 if (secp != NULL
1483 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1484 secp->the_bfd_section)
1485 & SEC_READONLY))
1486 {
1487 table = target_get_section_table (ops);
1488 return section_table_xfer_memory_partial (readbuf, writebuf,
1489 memaddr, len, xfered_len,
1490 table->sections,
1491 table->sections_end,
1492 NULL);
1493 }
1494 }
1495
1496 /* If reading unavailable memory in the context of traceframes, and
1497 this address falls within a read-only section, fallback to
1498 reading from live memory. */
1499 if (readbuf != NULL && get_traceframe_number () != -1)
1500 {
1501 VEC(mem_range_s) *available;
1502
1503 /* If we fail to get the set of available memory, then the
1504 target does not support querying traceframe info, and so we
1505 attempt reading from the traceframe anyway (assuming the
1506 target implements the old QTro packet then). */
1507 if (traceframe_available_memory (&available, memaddr, len))
1508 {
1509 struct cleanup *old_chain;
1510
1511 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1512
1513 if (VEC_empty (mem_range_s, available)
1514 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1515 {
1516 /* Don't read into the traceframe's available
1517 memory. */
1518 if (!VEC_empty (mem_range_s, available))
1519 {
1520 LONGEST oldlen = len;
1521
1522 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1523 gdb_assert (len <= oldlen);
1524 }
1525
1526 do_cleanups (old_chain);
1527
1528 /* This goes through the topmost target again. */
1529 res = memory_xfer_live_readonly_partial (ops, object,
1530 readbuf, memaddr,
1531 len, xfered_len);
1532 if (res == TARGET_XFER_OK)
1533 return TARGET_XFER_OK;
1534 else
1535 {
1536 /* No use trying further, we know some memory starting
1537 at MEMADDR isn't available. */
1538 *xfered_len = len;
1539 return TARGET_XFER_E_UNAVAILABLE;
1540 }
1541 }
1542
1543 /* Don't try to read more than how much is available, in
1544 case the target implements the deprecated QTro packet to
1545 cater for older GDBs (the target's knowledge of read-only
1546 sections may be outdated by now). */
1547 len = VEC_index (mem_range_s, available, 0)->length;
1548
1549 do_cleanups (old_chain);
1550 }
1551 }
1552
1553 /* Try GDB's internal data cache. */
1554 region = lookup_mem_region (memaddr);
1555 /* region->hi == 0 means there's no upper bound. */
1556 if (memaddr + len < region->hi || region->hi == 0)
1557 reg_len = len;
1558 else
1559 reg_len = region->hi - memaddr;
1560
1561 switch (region->attrib.mode)
1562 {
1563 case MEM_RO:
1564 if (writebuf != NULL)
1565 return TARGET_XFER_E_IO;
1566 break;
1567
1568 case MEM_WO:
1569 if (readbuf != NULL)
1570 return TARGET_XFER_E_IO;
1571 break;
1572
1573 case MEM_FLASH:
1574 /* We only support writing to flash during "load" for now. */
1575 if (writebuf != NULL)
1576 error (_("Writing to flash memory forbidden in this context"));
1577 break;
1578
1579 case MEM_NONE:
1580 return TARGET_XFER_E_IO;
1581 }
1582
1583 if (!ptid_equal (inferior_ptid, null_ptid))
1584 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1585 else
1586 inf = NULL;
1587
1588 if (inf != NULL
1589 /* The dcache reads whole cache lines; that doesn't play well
1590 with reading from a trace buffer, because reading outside of
1591 the collected memory range fails. */
1592 && get_traceframe_number () == -1
1593 && (region->attrib.cache
1594 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1595 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1596 {
1597 DCACHE *dcache = target_dcache_get_or_init ();
1598 int l;
1599
1600 if (readbuf != NULL)
1601 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1602 else
1603 /* FIXME drow/2006-08-09: If we're going to preserve const
1604 correctness dcache_xfer_memory should take readbuf and
1605 writebuf. */
1606 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1607 reg_len, 1);
1608 if (l <= 0)
1609 return TARGET_XFER_E_IO;
1610 else
1611 {
1612 *xfered_len = (ULONGEST) l;
1613 return TARGET_XFER_OK;
1614 }
1615 }
1616
1617 /* If none of those methods found the memory we wanted, fall back
1618 to a target partial transfer. Normally a single call to
1619 to_xfer_partial is enough; if it doesn't recognize an object
1620 it will call the to_xfer_partial of the next target down.
1621 But for memory this won't do. Memory is the only target
1622 object which can be read from more than one valid target.
1623 A core file, for instance, could have some of memory but
1624 delegate other bits to the target below it. So, we must
1625 manually try all targets. */
1626
1627 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1628 xfered_len);
1629
1630 /* Make sure the cache gets updated no matter what - if we are writing
1631 to the stack. Even if this write is not tagged as such, we still need
1632 to update the cache. */
1633
1634 if (res == TARGET_XFER_OK
1635 && inf != NULL
1636 && writebuf != NULL
1637 && target_dcache_init_p ()
1638 && !region->attrib.cache
1639 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1640 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1641 {
1642 DCACHE *dcache = target_dcache_get ();
1643
1644 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1645 }
1646
1647 /* If we still haven't got anything, return the last error. We
1648 give up. */
1649 return res;
1650}
1651
1652/* Perform a partial memory transfer. For docs see target.h,
1653 to_xfer_partial. */
1654
1655static enum target_xfer_status
1656memory_xfer_partial (struct target_ops *ops, enum target_object object,
1657 gdb_byte *readbuf, const gdb_byte *writebuf,
1658 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1659{
1660 enum target_xfer_status res;
1661
1662 /* Zero length requests are ok and require no work. */
1663 if (len == 0)
1664 return TARGET_XFER_EOF;
1665
1666 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1667 breakpoint insns, thus hiding out from higher layers whether
1668 there are software breakpoints inserted in the code stream. */
1669 if (readbuf != NULL)
1670 {
1671 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1672 xfered_len);
1673
1674 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1675 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1676 }
1677 else
1678 {
1679 void *buf;
1680 struct cleanup *old_chain;
1681
1682 /* A large write request is likely to be partially satisfied
1683 by memory_xfer_partial_1. We will continually malloc
1684 and free a copy of the entire write request for breakpoint
1685 shadow handling even though we only end up writing a small
1686 subset of it. Cap writes to 4KB to mitigate this. */
1687 len = min (4096, len);
1688
1689 buf = xmalloc (len);
1690 old_chain = make_cleanup (xfree, buf);
1691 memcpy (buf, writebuf, len);
1692
1693 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1694 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1695 xfered_len);
1696
1697 do_cleanups (old_chain);
1698 }
1699
1700 return res;
1701}
1702
1703static void
1704restore_show_memory_breakpoints (void *arg)
1705{
1706 show_memory_breakpoints = (uintptr_t) arg;
1707}
1708
1709struct cleanup *
1710make_show_memory_breakpoints_cleanup (int show)
1711{
1712 int current = show_memory_breakpoints;
1713
1714 show_memory_breakpoints = show;
1715 return make_cleanup (restore_show_memory_breakpoints,
1716 (void *) (uintptr_t) current);
1717}
1718
1719/* For docs see target.h, to_xfer_partial. */
1720
1721enum target_xfer_status
1722target_xfer_partial (struct target_ops *ops,
1723 enum target_object object, const char *annex,
1724 gdb_byte *readbuf, const gdb_byte *writebuf,
1725 ULONGEST offset, ULONGEST len,
1726 ULONGEST *xfered_len)
1727{
1728 enum target_xfer_status retval;
1729
1730 gdb_assert (ops->to_xfer_partial != NULL);
1731
1732 /* Transfer is done when LEN is zero. */
1733 if (len == 0)
1734 return TARGET_XFER_EOF;
1735
1736 if (writebuf && !may_write_memory)
1737 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1738 core_addr_to_string_nz (offset), plongest (len));
1739
1740 *xfered_len = 0;
1741
1742 /* If this is a memory transfer, let the memory-specific code
1743 have a look at it instead. Memory transfers are more
1744 complicated. */
1745 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1746 || object == TARGET_OBJECT_CODE_MEMORY)
1747 retval = memory_xfer_partial (ops, object, readbuf,
1748 writebuf, offset, len, xfered_len);
1749 else if (object == TARGET_OBJECT_RAW_MEMORY)
1750 {
1751 /* Request the normal memory object from other layers. */
1752 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1753 xfered_len);
1754 }
1755 else
1756 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1757 writebuf, offset, len, xfered_len);
1758
1759 if (targetdebug)
1760 {
1761 const unsigned char *myaddr = NULL;
1762
1763 fprintf_unfiltered (gdb_stdlog,
1764 "%s:target_xfer_partial "
1765 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1766 ops->to_shortname,
1767 (int) object,
1768 (annex ? annex : "(null)"),
1769 host_address_to_string (readbuf),
1770 host_address_to_string (writebuf),
1771 core_addr_to_string_nz (offset),
1772 pulongest (len), retval,
1773 pulongest (*xfered_len));
1774
1775 if (readbuf)
1776 myaddr = readbuf;
1777 if (writebuf)
1778 myaddr = writebuf;
1779 if (retval == TARGET_XFER_OK && myaddr != NULL)
1780 {
1781 int i;
1782
1783 fputs_unfiltered (", bytes =", gdb_stdlog);
1784 for (i = 0; i < *xfered_len; i++)
1785 {
1786 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1787 {
1788 if (targetdebug < 2 && i > 0)
1789 {
1790 fprintf_unfiltered (gdb_stdlog, " ...");
1791 break;
1792 }
1793 fprintf_unfiltered (gdb_stdlog, "\n");
1794 }
1795
1796 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1797 }
1798 }
1799
1800 fputc_unfiltered ('\n', gdb_stdlog);
1801 }
1802
1803 /* Check implementations of to_xfer_partial update *XFERED_LEN
1804 properly. Do assertion after printing debug messages, so that we
1805 can find more clues on assertion failure from debugging messages. */
1806 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1807 gdb_assert (*xfered_len > 0);
1808
1809 return retval;
1810}
1811
1812/* Read LEN bytes of target memory at address MEMADDR, placing the
1813 results in GDB's memory at MYADDR. Returns either 0 for success or
1814 TARGET_XFER_E_IO if any error occurs.
1815
1816 If an error occurs, no guarantee is made about the contents of the data at
1817 MYADDR. In particular, the caller should not depend upon partial reads
1818 filling the buffer with good data. There is no way for the caller to know
1819 how much good data might have been transfered anyway. Callers that can
1820 deal with partial reads should call target_read (which will retry until
1821 it makes no progress, and then return how much was transferred). */
1822
1823int
1824target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1825{
1826 /* Dispatch to the topmost target, not the flattened current_target.
1827 Memory accesses check target->to_has_(all_)memory, and the
1828 flattened target doesn't inherit those. */
1829 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1830 myaddr, memaddr, len) == len)
1831 return 0;
1832 else
1833 return TARGET_XFER_E_IO;
1834}
1835
1836/* Like target_read_memory, but specify explicitly that this is a read
1837 from the target's raw memory. That is, this read bypasses the
1838 dcache, breakpoint shadowing, etc. */
1839
1840int
1841target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1842{
1843 /* See comment in target_read_memory about why the request starts at
1844 current_target.beneath. */
1845 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1846 myaddr, memaddr, len) == len)
1847 return 0;
1848 else
1849 return TARGET_XFER_E_IO;
1850}
1851
1852/* Like target_read_memory, but specify explicitly that this is a read from
1853 the target's stack. This may trigger different cache behavior. */
1854
1855int
1856target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1857{
1858 /* See comment in target_read_memory about why the request starts at
1859 current_target.beneath. */
1860 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1861 myaddr, memaddr, len) == len)
1862 return 0;
1863 else
1864 return TARGET_XFER_E_IO;
1865}
1866
1867/* Like target_read_memory, but specify explicitly that this is a read from
1868 the target's code. This may trigger different cache behavior. */
1869
1870int
1871target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1872{
1873 /* See comment in target_read_memory about why the request starts at
1874 current_target.beneath. */
1875 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1876 myaddr, memaddr, len) == len)
1877 return 0;
1878 else
1879 return TARGET_XFER_E_IO;
1880}
1881
1882/* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1883 Returns either 0 for success or TARGET_XFER_E_IO if any
1884 error occurs. If an error occurs, no guarantee is made about how
1885 much data got written. Callers that can deal with partial writes
1886 should call target_write. */
1887
1888int
1889target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1890{
1891 /* See comment in target_read_memory about why the request starts at
1892 current_target.beneath. */
1893 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1894 myaddr, memaddr, len) == len)
1895 return 0;
1896 else
1897 return TARGET_XFER_E_IO;
1898}
1899
1900/* Write LEN bytes from MYADDR to target raw memory at address
1901 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1902 if any error occurs. If an error occurs, no guarantee is made
1903 about how much data got written. Callers that can deal with
1904 partial writes should call target_write. */
1905
1906int
1907target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1908{
1909 /* See comment in target_read_memory about why the request starts at
1910 current_target.beneath. */
1911 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1912 myaddr, memaddr, len) == len)
1913 return 0;
1914 else
1915 return TARGET_XFER_E_IO;
1916}
1917
1918/* Fetch the target's memory map. */
1919
1920VEC(mem_region_s) *
1921target_memory_map (void)
1922{
1923 VEC(mem_region_s) *result;
1924 struct mem_region *last_one, *this_one;
1925 int ix;
1926 struct target_ops *t;
1927
1928 if (targetdebug)
1929 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1930
1931 for (t = current_target.beneath; t != NULL; t = t->beneath)
1932 if (t->to_memory_map != NULL)
1933 break;
1934
1935 if (t == NULL)
1936 return NULL;
1937
1938 result = t->to_memory_map (t);
1939 if (result == NULL)
1940 return NULL;
1941
1942 qsort (VEC_address (mem_region_s, result),
1943 VEC_length (mem_region_s, result),
1944 sizeof (struct mem_region), mem_region_cmp);
1945
1946 /* Check that regions do not overlap. Simultaneously assign
1947 a numbering for the "mem" commands to use to refer to
1948 each region. */
1949 last_one = NULL;
1950 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1951 {
1952 this_one->number = ix;
1953
1954 if (last_one && last_one->hi > this_one->lo)
1955 {
1956 warning (_("Overlapping regions in memory map: ignoring"));
1957 VEC_free (mem_region_s, result);
1958 return NULL;
1959 }
1960 last_one = this_one;
1961 }
1962
1963 return result;
1964}
1965
1966void
1967target_flash_erase (ULONGEST address, LONGEST length)
1968{
1969 struct target_ops *t;
1970
1971 for (t = current_target.beneath; t != NULL; t = t->beneath)
1972 if (t->to_flash_erase != NULL)
1973 {
1974 if (targetdebug)
1975 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1976 hex_string (address), phex (length, 0));
1977 t->to_flash_erase (t, address, length);
1978 return;
1979 }
1980
1981 tcomplain ();
1982}
1983
1984void
1985target_flash_done (void)
1986{
1987 struct target_ops *t;
1988
1989 for (t = current_target.beneath; t != NULL; t = t->beneath)
1990 if (t->to_flash_done != NULL)
1991 {
1992 if (targetdebug)
1993 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1994 t->to_flash_done (t);
1995 return;
1996 }
1997
1998 tcomplain ();
1999}
2000
2001static void
2002show_trust_readonly (struct ui_file *file, int from_tty,
2003 struct cmd_list_element *c, const char *value)
2004{
2005 fprintf_filtered (file,
2006 _("Mode for reading from readonly sections is %s.\n"),
2007 value);
2008}
2009
2010/* More generic transfers. */
2011
2012static enum target_xfer_status
2013default_xfer_partial (struct target_ops *ops, enum target_object object,
2014 const char *annex, gdb_byte *readbuf,
2015 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2016 ULONGEST *xfered_len)
2017{
2018 if (object == TARGET_OBJECT_MEMORY
2019 && ops->deprecated_xfer_memory != NULL)
2020 /* If available, fall back to the target's
2021 "deprecated_xfer_memory" method. */
2022 {
2023 int xfered = -1;
2024
2025 errno = 0;
2026 if (writebuf != NULL)
2027 {
2028 void *buffer = xmalloc (len);
2029 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2030
2031 memcpy (buffer, writebuf, len);
2032 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2033 1/*write*/, NULL, ops);
2034 do_cleanups (cleanup);
2035 }
2036 if (readbuf != NULL)
2037 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2038 0/*read*/, NULL, ops);
2039 if (xfered > 0)
2040 {
2041 *xfered_len = (ULONGEST) xfered;
2042 return TARGET_XFER_E_IO;
2043 }
2044 else if (xfered == 0 && errno == 0)
2045 /* "deprecated_xfer_memory" uses 0, cross checked against
2046 ERRNO as one indication of an error. */
2047 return TARGET_XFER_EOF;
2048 else
2049 return TARGET_XFER_E_IO;
2050 }
2051 else
2052 {
2053 gdb_assert (ops->beneath != NULL);
2054 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2055 readbuf, writebuf, offset, len,
2056 xfered_len);
2057 }
2058}
2059
2060/* Target vector read/write partial wrapper functions. */
2061
2062static enum target_xfer_status
2063target_read_partial (struct target_ops *ops,
2064 enum target_object object,
2065 const char *annex, gdb_byte *buf,
2066 ULONGEST offset, ULONGEST len,
2067 ULONGEST *xfered_len)
2068{
2069 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2070 xfered_len);
2071}
2072
2073static enum target_xfer_status
2074target_write_partial (struct target_ops *ops,
2075 enum target_object object,
2076 const char *annex, const gdb_byte *buf,
2077 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2078{
2079 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2080 xfered_len);
2081}
2082
2083/* Wrappers to perform the full transfer. */
2084
2085/* For docs on target_read see target.h. */
2086
2087LONGEST
2088target_read (struct target_ops *ops,
2089 enum target_object object,
2090 const char *annex, gdb_byte *buf,
2091 ULONGEST offset, LONGEST len)
2092{
2093 LONGEST xfered = 0;
2094
2095 while (xfered < len)
2096 {
2097 ULONGEST xfered_len;
2098 enum target_xfer_status status;
2099
2100 status = target_read_partial (ops, object, annex,
2101 (gdb_byte *) buf + xfered,
2102 offset + xfered, len - xfered,
2103 &xfered_len);
2104
2105 /* Call an observer, notifying them of the xfer progress? */
2106 if (status == TARGET_XFER_EOF)
2107 return xfered;
2108 else if (status == TARGET_XFER_OK)
2109 {
2110 xfered += xfered_len;
2111 QUIT;
2112 }
2113 else
2114 return -1;
2115
2116 }
2117 return len;
2118}
2119
2120/* Assuming that the entire [begin, end) range of memory cannot be
2121 read, try to read whatever subrange is possible to read.
2122
2123 The function returns, in RESULT, either zero or one memory block.
2124 If there's a readable subrange at the beginning, it is completely
2125 read and returned. Any further readable subrange will not be read.
2126 Otherwise, if there's a readable subrange at the end, it will be
2127 completely read and returned. Any readable subranges before it
2128 (obviously, not starting at the beginning), will be ignored. In
2129 other cases -- either no readable subrange, or readable subrange(s)
2130 that is neither at the beginning, or end, nothing is returned.
2131
2132 The purpose of this function is to handle a read across a boundary
2133 of accessible memory in a case when memory map is not available.
2134 The above restrictions are fine for this case, but will give
2135 incorrect results if the memory is 'patchy'. However, supporting
2136 'patchy' memory would require trying to read every single byte,
2137 and it seems unacceptable solution. Explicit memory map is
2138 recommended for this case -- and target_read_memory_robust will
2139 take care of reading multiple ranges then. */
2140
2141static void
2142read_whatever_is_readable (struct target_ops *ops,
2143 ULONGEST begin, ULONGEST end,
2144 VEC(memory_read_result_s) **result)
2145{
2146 gdb_byte *buf = xmalloc (end - begin);
2147 ULONGEST current_begin = begin;
2148 ULONGEST current_end = end;
2149 int forward;
2150 memory_read_result_s r;
2151 ULONGEST xfered_len;
2152
2153 /* If we previously failed to read 1 byte, nothing can be done here. */
2154 if (end - begin <= 1)
2155 {
2156 xfree (buf);
2157 return;
2158 }
2159
2160 /* Check that either first or the last byte is readable, and give up
2161 if not. This heuristic is meant to permit reading accessible memory
2162 at the boundary of accessible region. */
2163 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2164 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2165 {
2166 forward = 1;
2167 ++current_begin;
2168 }
2169 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2170 buf + (end-begin) - 1, end - 1, 1,
2171 &xfered_len) == TARGET_XFER_OK)
2172 {
2173 forward = 0;
2174 --current_end;
2175 }
2176 else
2177 {
2178 xfree (buf);
2179 return;
2180 }
2181
2182 /* Loop invariant is that the [current_begin, current_end) was previously
2183 found to be not readable as a whole.
2184
2185 Note loop condition -- if the range has 1 byte, we can't divide the range
2186 so there's no point trying further. */
2187 while (current_end - current_begin > 1)
2188 {
2189 ULONGEST first_half_begin, first_half_end;
2190 ULONGEST second_half_begin, second_half_end;
2191 LONGEST xfer;
2192 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2193
2194 if (forward)
2195 {
2196 first_half_begin = current_begin;
2197 first_half_end = middle;
2198 second_half_begin = middle;
2199 second_half_end = current_end;
2200 }
2201 else
2202 {
2203 first_half_begin = middle;
2204 first_half_end = current_end;
2205 second_half_begin = current_begin;
2206 second_half_end = middle;
2207 }
2208
2209 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2210 buf + (first_half_begin - begin),
2211 first_half_begin,
2212 first_half_end - first_half_begin);
2213
2214 if (xfer == first_half_end - first_half_begin)
2215 {
2216 /* This half reads up fine. So, the error must be in the
2217 other half. */
2218 current_begin = second_half_begin;
2219 current_end = second_half_end;
2220 }
2221 else
2222 {
2223 /* This half is not readable. Because we've tried one byte, we
2224 know some part of this half if actually redable. Go to the next
2225 iteration to divide again and try to read.
2226
2227 We don't handle the other half, because this function only tries
2228 to read a single readable subrange. */
2229 current_begin = first_half_begin;
2230 current_end = first_half_end;
2231 }
2232 }
2233
2234 if (forward)
2235 {
2236 /* The [begin, current_begin) range has been read. */
2237 r.begin = begin;
2238 r.end = current_begin;
2239 r.data = buf;
2240 }
2241 else
2242 {
2243 /* The [current_end, end) range has been read. */
2244 LONGEST rlen = end - current_end;
2245
2246 r.data = xmalloc (rlen);
2247 memcpy (r.data, buf + current_end - begin, rlen);
2248 r.begin = current_end;
2249 r.end = end;
2250 xfree (buf);
2251 }
2252 VEC_safe_push(memory_read_result_s, (*result), &r);
2253}
2254
2255void
2256free_memory_read_result_vector (void *x)
2257{
2258 VEC(memory_read_result_s) *v = x;
2259 memory_read_result_s *current;
2260 int ix;
2261
2262 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2263 {
2264 xfree (current->data);
2265 }
2266 VEC_free (memory_read_result_s, v);
2267}
2268
2269VEC(memory_read_result_s) *
2270read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2271{
2272 VEC(memory_read_result_s) *result = 0;
2273
2274 LONGEST xfered = 0;
2275 while (xfered < len)
2276 {
2277 struct mem_region *region = lookup_mem_region (offset + xfered);
2278 LONGEST rlen;
2279
2280 /* If there is no explicit region, a fake one should be created. */
2281 gdb_assert (region);
2282
2283 if (region->hi == 0)
2284 rlen = len - xfered;
2285 else
2286 rlen = region->hi - offset;
2287
2288 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2289 {
2290 /* Cannot read this region. Note that we can end up here only
2291 if the region is explicitly marked inaccessible, or
2292 'inaccessible-by-default' is in effect. */
2293 xfered += rlen;
2294 }
2295 else
2296 {
2297 LONGEST to_read = min (len - xfered, rlen);
2298 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2299
2300 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2301 (gdb_byte *) buffer,
2302 offset + xfered, to_read);
2303 /* Call an observer, notifying them of the xfer progress? */
2304 if (xfer <= 0)
2305 {
2306 /* Got an error reading full chunk. See if maybe we can read
2307 some subrange. */
2308 xfree (buffer);
2309 read_whatever_is_readable (ops, offset + xfered,
2310 offset + xfered + to_read, &result);
2311 xfered += to_read;
2312 }
2313 else
2314 {
2315 struct memory_read_result r;
2316 r.data = buffer;
2317 r.begin = offset + xfered;
2318 r.end = r.begin + xfer;
2319 VEC_safe_push (memory_read_result_s, result, &r);
2320 xfered += xfer;
2321 }
2322 QUIT;
2323 }
2324 }
2325 return result;
2326}
2327
2328
2329/* An alternative to target_write with progress callbacks. */
2330
2331LONGEST
2332target_write_with_progress (struct target_ops *ops,
2333 enum target_object object,
2334 const char *annex, const gdb_byte *buf,
2335 ULONGEST offset, LONGEST len,
2336 void (*progress) (ULONGEST, void *), void *baton)
2337{
2338 LONGEST xfered = 0;
2339
2340 /* Give the progress callback a chance to set up. */
2341 if (progress)
2342 (*progress) (0, baton);
2343
2344 while (xfered < len)
2345 {
2346 ULONGEST xfered_len;
2347 enum target_xfer_status status;
2348
2349 status = target_write_partial (ops, object, annex,
2350 (gdb_byte *) buf + xfered,
2351 offset + xfered, len - xfered,
2352 &xfered_len);
2353
2354 if (status == TARGET_XFER_EOF)
2355 return xfered;
2356 if (TARGET_XFER_STATUS_ERROR_P (status))
2357 return -1;
2358
2359 gdb_assert (status == TARGET_XFER_OK);
2360 if (progress)
2361 (*progress) (xfered_len, baton);
2362
2363 xfered += xfered_len;
2364 QUIT;
2365 }
2366 return len;
2367}
2368
2369/* For docs on target_write see target.h. */
2370
2371LONGEST
2372target_write (struct target_ops *ops,
2373 enum target_object object,
2374 const char *annex, const gdb_byte *buf,
2375 ULONGEST offset, LONGEST len)
2376{
2377 return target_write_with_progress (ops, object, annex, buf, offset, len,
2378 NULL, NULL);
2379}
2380
2381/* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2382 the size of the transferred data. PADDING additional bytes are
2383 available in *BUF_P. This is a helper function for
2384 target_read_alloc; see the declaration of that function for more
2385 information. */
2386
2387static LONGEST
2388target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2389 const char *annex, gdb_byte **buf_p, int padding)
2390{
2391 size_t buf_alloc, buf_pos;
2392 gdb_byte *buf;
2393
2394 /* This function does not have a length parameter; it reads the
2395 entire OBJECT). Also, it doesn't support objects fetched partly
2396 from one target and partly from another (in a different stratum,
2397 e.g. a core file and an executable). Both reasons make it
2398 unsuitable for reading memory. */
2399 gdb_assert (object != TARGET_OBJECT_MEMORY);
2400
2401 /* Start by reading up to 4K at a time. The target will throttle
2402 this number down if necessary. */
2403 buf_alloc = 4096;
2404 buf = xmalloc (buf_alloc);
2405 buf_pos = 0;
2406 while (1)
2407 {
2408 ULONGEST xfered_len;
2409 enum target_xfer_status status;
2410
2411 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2412 buf_pos, buf_alloc - buf_pos - padding,
2413 &xfered_len);
2414
2415 if (status == TARGET_XFER_EOF)
2416 {
2417 /* Read all there was. */
2418 if (buf_pos == 0)
2419 xfree (buf);
2420 else
2421 *buf_p = buf;
2422 return buf_pos;
2423 }
2424 else if (status != TARGET_XFER_OK)
2425 {
2426 /* An error occurred. */
2427 xfree (buf);
2428 return TARGET_XFER_E_IO;
2429 }
2430
2431 buf_pos += xfered_len;
2432
2433 /* If the buffer is filling up, expand it. */
2434 if (buf_alloc < buf_pos * 2)
2435 {
2436 buf_alloc *= 2;
2437 buf = xrealloc (buf, buf_alloc);
2438 }
2439
2440 QUIT;
2441 }
2442}
2443
2444/* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2445 the size of the transferred data. See the declaration in "target.h"
2446 function for more information about the return value. */
2447
2448LONGEST
2449target_read_alloc (struct target_ops *ops, enum target_object object,
2450 const char *annex, gdb_byte **buf_p)
2451{
2452 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2453}
2454
2455/* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2456 returned as a string, allocated using xmalloc. If an error occurs
2457 or the transfer is unsupported, NULL is returned. Empty objects
2458 are returned as allocated but empty strings. A warning is issued
2459 if the result contains any embedded NUL bytes. */
2460
2461char *
2462target_read_stralloc (struct target_ops *ops, enum target_object object,
2463 const char *annex)
2464{
2465 gdb_byte *buffer;
2466 char *bufstr;
2467 LONGEST i, transferred;
2468
2469 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2470 bufstr = (char *) buffer;
2471
2472 if (transferred < 0)
2473 return NULL;
2474
2475 if (transferred == 0)
2476 return xstrdup ("");
2477
2478 bufstr[transferred] = 0;
2479
2480 /* Check for embedded NUL bytes; but allow trailing NULs. */
2481 for (i = strlen (bufstr); i < transferred; i++)
2482 if (bufstr[i] != 0)
2483 {
2484 warning (_("target object %d, annex %s, "
2485 "contained unexpected null characters"),
2486 (int) object, annex ? annex : "(none)");
2487 break;
2488 }
2489
2490 return bufstr;
2491}
2492
2493/* Memory transfer methods. */
2494
2495void
2496get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2497 LONGEST len)
2498{
2499 /* This method is used to read from an alternate, non-current
2500 target. This read must bypass the overlay support (as symbols
2501 don't match this target), and GDB's internal cache (wrong cache
2502 for this target). */
2503 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2504 != len)
2505 memory_error (TARGET_XFER_E_IO, addr);
2506}
2507
2508ULONGEST
2509get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2510 int len, enum bfd_endian byte_order)
2511{
2512 gdb_byte buf[sizeof (ULONGEST)];
2513
2514 gdb_assert (len <= sizeof (buf));
2515 get_target_memory (ops, addr, buf, len);
2516 return extract_unsigned_integer (buf, len, byte_order);
2517}
2518
2519/* See target.h. */
2520
2521int
2522target_insert_breakpoint (struct gdbarch *gdbarch,
2523 struct bp_target_info *bp_tgt)
2524{
2525 if (!may_insert_breakpoints)
2526 {
2527 warning (_("May not insert breakpoints"));
2528 return 1;
2529 }
2530
2531 return current_target.to_insert_breakpoint (&current_target,
2532 gdbarch, bp_tgt);
2533}
2534
2535/* See target.h. */
2536
2537int
2538target_remove_breakpoint (struct gdbarch *gdbarch,
2539 struct bp_target_info *bp_tgt)
2540{
2541 /* This is kind of a weird case to handle, but the permission might
2542 have been changed after breakpoints were inserted - in which case
2543 we should just take the user literally and assume that any
2544 breakpoints should be left in place. */
2545 if (!may_insert_breakpoints)
2546 {
2547 warning (_("May not remove breakpoints"));
2548 return 1;
2549 }
2550
2551 return current_target.to_remove_breakpoint (&current_target,
2552 gdbarch, bp_tgt);
2553}
2554
2555static void
2556target_info (char *args, int from_tty)
2557{
2558 struct target_ops *t;
2559 int has_all_mem = 0;
2560
2561 if (symfile_objfile != NULL)
2562 printf_unfiltered (_("Symbols from \"%s\".\n"),
2563 objfile_name (symfile_objfile));
2564
2565 for (t = target_stack; t != NULL; t = t->beneath)
2566 {
2567 if (!(*t->to_has_memory) (t))
2568 continue;
2569
2570 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2571 continue;
2572 if (has_all_mem)
2573 printf_unfiltered (_("\tWhile running this, "
2574 "GDB does not access memory from...\n"));
2575 printf_unfiltered ("%s:\n", t->to_longname);
2576 (t->to_files_info) (t);
2577 has_all_mem = (*t->to_has_all_memory) (t);
2578 }
2579}
2580
2581/* This function is called before any new inferior is created, e.g.
2582 by running a program, attaching, or connecting to a target.
2583 It cleans up any state from previous invocations which might
2584 change between runs. This is a subset of what target_preopen
2585 resets (things which might change between targets). */
2586
2587void
2588target_pre_inferior (int from_tty)
2589{
2590 /* Clear out solib state. Otherwise the solib state of the previous
2591 inferior might have survived and is entirely wrong for the new
2592 target. This has been observed on GNU/Linux using glibc 2.3. How
2593 to reproduce:
2594
2595 bash$ ./foo&
2596 [1] 4711
2597 bash$ ./foo&
2598 [1] 4712
2599 bash$ gdb ./foo
2600 [...]
2601 (gdb) attach 4711
2602 (gdb) detach
2603 (gdb) attach 4712
2604 Cannot access memory at address 0xdeadbeef
2605 */
2606
2607 /* In some OSs, the shared library list is the same/global/shared
2608 across inferiors. If code is shared between processes, so are
2609 memory regions and features. */
2610 if (!gdbarch_has_global_solist (target_gdbarch ()))
2611 {
2612 no_shared_libraries (NULL, from_tty);
2613
2614 invalidate_target_mem_regions ();
2615
2616 target_clear_description ();
2617 }
2618
2619 agent_capability_invalidate ();
2620}
2621
2622/* Callback for iterate_over_inferiors. Gets rid of the given
2623 inferior. */
2624
2625static int
2626dispose_inferior (struct inferior *inf, void *args)
2627{
2628 struct thread_info *thread;
2629
2630 thread = any_thread_of_process (inf->pid);
2631 if (thread)
2632 {
2633 switch_to_thread (thread->ptid);
2634
2635 /* Core inferiors actually should be detached, not killed. */
2636 if (target_has_execution)
2637 target_kill ();
2638 else
2639 target_detach (NULL, 0);
2640 }
2641
2642 return 0;
2643}
2644
2645/* This is to be called by the open routine before it does
2646 anything. */
2647
2648void
2649target_preopen (int from_tty)
2650{
2651 dont_repeat ();
2652
2653 if (have_inferiors ())
2654 {
2655 if (!from_tty
2656 || !have_live_inferiors ()
2657 || query (_("A program is being debugged already. Kill it? ")))
2658 iterate_over_inferiors (dispose_inferior, NULL);
2659 else
2660 error (_("Program not killed."));
2661 }
2662
2663 /* Calling target_kill may remove the target from the stack. But if
2664 it doesn't (which seems like a win for UDI), remove it now. */
2665 /* Leave the exec target, though. The user may be switching from a
2666 live process to a core of the same program. */
2667 pop_all_targets_above (file_stratum);
2668
2669 target_pre_inferior (from_tty);
2670}
2671
2672/* Detach a target after doing deferred register stores. */
2673
2674void
2675target_detach (const char *args, int from_tty)
2676{
2677 struct target_ops* t;
2678
2679 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2680 /* Don't remove global breakpoints here. They're removed on
2681 disconnection from the target. */
2682 ;
2683 else
2684 /* If we're in breakpoints-always-inserted mode, have to remove
2685 them before detaching. */
2686 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2687
2688 prepare_for_detach ();
2689
2690 for (t = current_target.beneath; t != NULL; t = t->beneath)
2691 {
2692 if (t->to_detach != NULL)
2693 {
2694 t->to_detach (t, args, from_tty);
2695 if (targetdebug)
2696 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2697 args, from_tty);
2698 return;
2699 }
2700 }
2701
2702 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2703}
2704
2705void
2706target_disconnect (char *args, int from_tty)
2707{
2708 struct target_ops *t;
2709
2710 /* If we're in breakpoints-always-inserted mode or if breakpoints
2711 are global across processes, we have to remove them before
2712 disconnecting. */
2713 remove_breakpoints ();
2714
2715 for (t = current_target.beneath; t != NULL; t = t->beneath)
2716 if (t->to_disconnect != NULL)
2717 {
2718 if (targetdebug)
2719 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2720 args, from_tty);
2721 t->to_disconnect (t, args, from_tty);
2722 return;
2723 }
2724
2725 tcomplain ();
2726}
2727
2728ptid_t
2729target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2730{
2731 struct target_ops *t;
2732 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2733 status, options);
2734
2735 if (targetdebug)
2736 {
2737 char *status_string;
2738 char *options_string;
2739
2740 status_string = target_waitstatus_to_string (status);
2741 options_string = target_options_to_string (options);
2742 fprintf_unfiltered (gdb_stdlog,
2743 "target_wait (%d, status, options={%s})"
2744 " = %d, %s\n",
2745 ptid_get_pid (ptid), options_string,
2746 ptid_get_pid (retval), status_string);
2747 xfree (status_string);
2748 xfree (options_string);
2749 }
2750
2751 return retval;
2752}
2753
2754char *
2755target_pid_to_str (ptid_t ptid)
2756{
2757 struct target_ops *t;
2758
2759 for (t = current_target.beneath; t != NULL; t = t->beneath)
2760 {
2761 if (t->to_pid_to_str != NULL)
2762 return (*t->to_pid_to_str) (t, ptid);
2763 }
2764
2765 return normal_pid_to_str (ptid);
2766}
2767
2768char *
2769target_thread_name (struct thread_info *info)
2770{
2771 struct target_ops *t;
2772
2773 for (t = current_target.beneath; t != NULL; t = t->beneath)
2774 {
2775 if (t->to_thread_name != NULL)
2776 return (*t->to_thread_name) (t, info);
2777 }
2778
2779 return NULL;
2780}
2781
2782void
2783target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2784{
2785 struct target_ops *t;
2786
2787 target_dcache_invalidate ();
2788
2789 current_target.to_resume (&current_target, ptid, step, signal);
2790 if (targetdebug)
2791 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2792 ptid_get_pid (ptid),
2793 step ? "step" : "continue",
2794 gdb_signal_to_name (signal));
2795
2796 registers_changed_ptid (ptid);
2797 set_executing (ptid, 1);
2798 set_running (ptid, 1);
2799 clear_inline_frame_state (ptid);
2800}
2801
2802void
2803target_pass_signals (int numsigs, unsigned char *pass_signals)
2804{
2805 struct target_ops *t;
2806
2807 for (t = current_target.beneath; t != NULL; t = t->beneath)
2808 {
2809 if (t->to_pass_signals != NULL)
2810 {
2811 if (targetdebug)
2812 {
2813 int i;
2814
2815 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2816 numsigs);
2817
2818 for (i = 0; i < numsigs; i++)
2819 if (pass_signals[i])
2820 fprintf_unfiltered (gdb_stdlog, " %s",
2821 gdb_signal_to_name (i));
2822
2823 fprintf_unfiltered (gdb_stdlog, " })\n");
2824 }
2825
2826 (*t->to_pass_signals) (t, numsigs, pass_signals);
2827 return;
2828 }
2829 }
2830}
2831
2832void
2833target_program_signals (int numsigs, unsigned char *program_signals)
2834{
2835 struct target_ops *t;
2836
2837 for (t = current_target.beneath; t != NULL; t = t->beneath)
2838 {
2839 if (t->to_program_signals != NULL)
2840 {
2841 if (targetdebug)
2842 {
2843 int i;
2844
2845 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2846 numsigs);
2847
2848 for (i = 0; i < numsigs; i++)
2849 if (program_signals[i])
2850 fprintf_unfiltered (gdb_stdlog, " %s",
2851 gdb_signal_to_name (i));
2852
2853 fprintf_unfiltered (gdb_stdlog, " })\n");
2854 }
2855
2856 (*t->to_program_signals) (t, numsigs, program_signals);
2857 return;
2858 }
2859 }
2860}
2861
2862/* Look through the list of possible targets for a target that can
2863 follow forks. */
2864
2865int
2866target_follow_fork (int follow_child, int detach_fork)
2867{
2868 struct target_ops *t;
2869
2870 for (t = current_target.beneath; t != NULL; t = t->beneath)
2871 {
2872 if (t->to_follow_fork != NULL)
2873 {
2874 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2875
2876 if (targetdebug)
2877 fprintf_unfiltered (gdb_stdlog,
2878 "target_follow_fork (%d, %d) = %d\n",
2879 follow_child, detach_fork, retval);
2880 return retval;
2881 }
2882 }
2883
2884 /* Some target returned a fork event, but did not know how to follow it. */
2885 internal_error (__FILE__, __LINE__,
2886 _("could not find a target to follow fork"));
2887}
2888
2889void
2890target_mourn_inferior (void)
2891{
2892 struct target_ops *t;
2893
2894 for (t = current_target.beneath; t != NULL; t = t->beneath)
2895 {
2896 if (t->to_mourn_inferior != NULL)
2897 {
2898 t->to_mourn_inferior (t);
2899 if (targetdebug)
2900 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2901
2902 /* We no longer need to keep handles on any of the object files.
2903 Make sure to release them to avoid unnecessarily locking any
2904 of them while we're not actually debugging. */
2905 bfd_cache_close_all ();
2906
2907 return;
2908 }
2909 }
2910
2911 internal_error (__FILE__, __LINE__,
2912 _("could not find a target to follow mourn inferior"));
2913}
2914
2915/* Look for a target which can describe architectural features, starting
2916 from TARGET. If we find one, return its description. */
2917
2918const struct target_desc *
2919target_read_description (struct target_ops *target)
2920{
2921 struct target_ops *t;
2922
2923 for (t = target; t != NULL; t = t->beneath)
2924 if (t->to_read_description != NULL)
2925 {
2926 const struct target_desc *tdesc;
2927
2928 tdesc = t->to_read_description (t);
2929 if (tdesc)
2930 return tdesc;
2931 }
2932
2933 return NULL;
2934}
2935
2936/* The default implementation of to_search_memory.
2937 This implements a basic search of memory, reading target memory and
2938 performing the search here (as opposed to performing the search in on the
2939 target side with, for example, gdbserver). */
2940
2941int
2942simple_search_memory (struct target_ops *ops,
2943 CORE_ADDR start_addr, ULONGEST search_space_len,
2944 const gdb_byte *pattern, ULONGEST pattern_len,
2945 CORE_ADDR *found_addrp)
2946{
2947 /* NOTE: also defined in find.c testcase. */
2948#define SEARCH_CHUNK_SIZE 16000
2949 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2950 /* Buffer to hold memory contents for searching. */
2951 gdb_byte *search_buf;
2952 unsigned search_buf_size;
2953 struct cleanup *old_cleanups;
2954
2955 search_buf_size = chunk_size + pattern_len - 1;
2956
2957 /* No point in trying to allocate a buffer larger than the search space. */
2958 if (search_space_len < search_buf_size)
2959 search_buf_size = search_space_len;
2960
2961 search_buf = malloc (search_buf_size);
2962 if (search_buf == NULL)
2963 error (_("Unable to allocate memory to perform the search."));
2964 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2965
2966 /* Prime the search buffer. */
2967
2968 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2969 search_buf, start_addr, search_buf_size) != search_buf_size)
2970 {
2971 warning (_("Unable to access %s bytes of target "
2972 "memory at %s, halting search."),
2973 pulongest (search_buf_size), hex_string (start_addr));
2974 do_cleanups (old_cleanups);
2975 return -1;
2976 }
2977
2978 /* Perform the search.
2979
2980 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2981 When we've scanned N bytes we copy the trailing bytes to the start and
2982 read in another N bytes. */
2983
2984 while (search_space_len >= pattern_len)
2985 {
2986 gdb_byte *found_ptr;
2987 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2988
2989 found_ptr = memmem (search_buf, nr_search_bytes,
2990 pattern, pattern_len);
2991
2992 if (found_ptr != NULL)
2993 {
2994 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2995
2996 *found_addrp = found_addr;
2997 do_cleanups (old_cleanups);
2998 return 1;
2999 }
3000
3001 /* Not found in this chunk, skip to next chunk. */
3002
3003 /* Don't let search_space_len wrap here, it's unsigned. */
3004 if (search_space_len >= chunk_size)
3005 search_space_len -= chunk_size;
3006 else
3007 search_space_len = 0;
3008
3009 if (search_space_len >= pattern_len)
3010 {
3011 unsigned keep_len = search_buf_size - chunk_size;
3012 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3013 int nr_to_read;
3014
3015 /* Copy the trailing part of the previous iteration to the front
3016 of the buffer for the next iteration. */
3017 gdb_assert (keep_len == pattern_len - 1);
3018 memcpy (search_buf, search_buf + chunk_size, keep_len);
3019
3020 nr_to_read = min (search_space_len - keep_len, chunk_size);
3021
3022 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3023 search_buf + keep_len, read_addr,
3024 nr_to_read) != nr_to_read)
3025 {
3026 warning (_("Unable to access %s bytes of target "
3027 "memory at %s, halting search."),
3028 plongest (nr_to_read),
3029 hex_string (read_addr));
3030 do_cleanups (old_cleanups);
3031 return -1;
3032 }
3033
3034 start_addr += chunk_size;
3035 }
3036 }
3037
3038 /* Not found. */
3039
3040 do_cleanups (old_cleanups);
3041 return 0;
3042}
3043
3044/* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3045 sequence of bytes in PATTERN with length PATTERN_LEN.
3046
3047 The result is 1 if found, 0 if not found, and -1 if there was an error
3048 requiring halting of the search (e.g. memory read error).
3049 If the pattern is found the address is recorded in FOUND_ADDRP. */
3050
3051int
3052target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3053 const gdb_byte *pattern, ULONGEST pattern_len,
3054 CORE_ADDR *found_addrp)
3055{
3056 struct target_ops *t;
3057 int found;
3058
3059 /* We don't use INHERIT to set current_target.to_search_memory,
3060 so we have to scan the target stack and handle targetdebug
3061 ourselves. */
3062
3063 if (targetdebug)
3064 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3065 hex_string (start_addr));
3066
3067 for (t = current_target.beneath; t != NULL; t = t->beneath)
3068 if (t->to_search_memory != NULL)
3069 break;
3070
3071 if (t != NULL)
3072 {
3073 found = t->to_search_memory (t, start_addr, search_space_len,
3074 pattern, pattern_len, found_addrp);
3075 }
3076 else
3077 {
3078 /* If a special version of to_search_memory isn't available, use the
3079 simple version. */
3080 found = simple_search_memory (current_target.beneath,
3081 start_addr, search_space_len,
3082 pattern, pattern_len, found_addrp);
3083 }
3084
3085 if (targetdebug)
3086 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3087
3088 return found;
3089}
3090
3091/* Look through the currently pushed targets. If none of them will
3092 be able to restart the currently running process, issue an error
3093 message. */
3094
3095void
3096target_require_runnable (void)
3097{
3098 struct target_ops *t;
3099
3100 for (t = target_stack; t != NULL; t = t->beneath)
3101 {
3102 /* If this target knows how to create a new program, then
3103 assume we will still be able to after killing the current
3104 one. Either killing and mourning will not pop T, or else
3105 find_default_run_target will find it again. */
3106 if (t->to_create_inferior != NULL)
3107 return;
3108
3109 /* Do not worry about thread_stratum targets that can not
3110 create inferiors. Assume they will be pushed again if
3111 necessary, and continue to the process_stratum. */
3112 if (t->to_stratum == thread_stratum
3113 || t->to_stratum == arch_stratum)
3114 continue;
3115
3116 error (_("The \"%s\" target does not support \"run\". "
3117 "Try \"help target\" or \"continue\"."),
3118 t->to_shortname);
3119 }
3120
3121 /* This function is only called if the target is running. In that
3122 case there should have been a process_stratum target and it
3123 should either know how to create inferiors, or not... */
3124 internal_error (__FILE__, __LINE__, _("No targets found"));
3125}
3126
3127/* Look through the list of possible targets for a target that can
3128 execute a run or attach command without any other data. This is
3129 used to locate the default process stratum.
3130
3131 If DO_MESG is not NULL, the result is always valid (error() is
3132 called for errors); else, return NULL on error. */
3133
3134static struct target_ops *
3135find_default_run_target (char *do_mesg)
3136{
3137 struct target_ops **t;
3138 struct target_ops *runable = NULL;
3139 int count;
3140
3141 count = 0;
3142
3143 for (t = target_structs; t < target_structs + target_struct_size;
3144 ++t)
3145 {
3146 if ((*t)->to_can_run && target_can_run (*t))
3147 {
3148 runable = *t;
3149 ++count;
3150 }
3151 }
3152
3153 if (count != 1)
3154 {
3155 if (do_mesg)
3156 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3157 else
3158 return NULL;
3159 }
3160
3161 return runable;
3162}
3163
3164void
3165find_default_attach (struct target_ops *ops, char *args, int from_tty)
3166{
3167 struct target_ops *t;
3168
3169 t = find_default_run_target ("attach");
3170 (t->to_attach) (t, args, from_tty);
3171 return;
3172}
3173
3174void
3175find_default_create_inferior (struct target_ops *ops,
3176 char *exec_file, char *allargs, char **env,
3177 int from_tty)
3178{
3179 struct target_ops *t;
3180
3181 t = find_default_run_target ("run");
3182 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3183 return;
3184}
3185
3186static int
3187find_default_can_async_p (struct target_ops *ignore)
3188{
3189 struct target_ops *t;
3190
3191 /* This may be called before the target is pushed on the stack;
3192 look for the default process stratum. If there's none, gdb isn't
3193 configured with a native debugger, and target remote isn't
3194 connected yet. */
3195 t = find_default_run_target (NULL);
3196 if (t && t->to_can_async_p != delegate_can_async_p)
3197 return (t->to_can_async_p) (t);
3198 return 0;
3199}
3200
3201static int
3202find_default_is_async_p (struct target_ops *ignore)
3203{
3204 struct target_ops *t;
3205
3206 /* This may be called before the target is pushed on the stack;
3207 look for the default process stratum. If there's none, gdb isn't
3208 configured with a native debugger, and target remote isn't
3209 connected yet. */
3210 t = find_default_run_target (NULL);
3211 if (t && t->to_is_async_p != delegate_is_async_p)
3212 return (t->to_is_async_p) (t);
3213 return 0;
3214}
3215
3216static int
3217find_default_supports_non_stop (struct target_ops *self)
3218{
3219 struct target_ops *t;
3220
3221 t = find_default_run_target (NULL);
3222 if (t && t->to_supports_non_stop)
3223 return (t->to_supports_non_stop) (t);
3224 return 0;
3225}
3226
3227int
3228target_supports_non_stop (void)
3229{
3230 struct target_ops *t;
3231
3232 for (t = &current_target; t != NULL; t = t->beneath)
3233 if (t->to_supports_non_stop)
3234 return t->to_supports_non_stop (t);
3235
3236 return 0;
3237}
3238
3239/* Implement the "info proc" command. */
3240
3241int
3242target_info_proc (char *args, enum info_proc_what what)
3243{
3244 struct target_ops *t;
3245
3246 /* If we're already connected to something that can get us OS
3247 related data, use it. Otherwise, try using the native
3248 target. */
3249 if (current_target.to_stratum >= process_stratum)
3250 t = current_target.beneath;
3251 else
3252 t = find_default_run_target (NULL);
3253
3254 for (; t != NULL; t = t->beneath)
3255 {
3256 if (t->to_info_proc != NULL)
3257 {
3258 t->to_info_proc (t, args, what);
3259
3260 if (targetdebug)
3261 fprintf_unfiltered (gdb_stdlog,
3262 "target_info_proc (\"%s\", %d)\n", args, what);
3263
3264 return 1;
3265 }
3266 }
3267
3268 return 0;
3269}
3270
3271static int
3272find_default_supports_disable_randomization (struct target_ops *self)
3273{
3274 struct target_ops *t;
3275
3276 t = find_default_run_target (NULL);
3277 if (t && t->to_supports_disable_randomization)
3278 return (t->to_supports_disable_randomization) (t);
3279 return 0;
3280}
3281
3282int
3283target_supports_disable_randomization (void)
3284{
3285 struct target_ops *t;
3286
3287 for (t = &current_target; t != NULL; t = t->beneath)
3288 if (t->to_supports_disable_randomization)
3289 return t->to_supports_disable_randomization (t);
3290
3291 return 0;
3292}
3293
3294char *
3295target_get_osdata (const char *type)
3296{
3297 struct target_ops *t;
3298
3299 /* If we're already connected to something that can get us OS
3300 related data, use it. Otherwise, try using the native
3301 target. */
3302 if (current_target.to_stratum >= process_stratum)
3303 t = current_target.beneath;
3304 else
3305 t = find_default_run_target ("get OS data");
3306
3307 if (!t)
3308 return NULL;
3309
3310 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3311}
3312
3313/* Determine the current address space of thread PTID. */
3314
3315struct address_space *
3316target_thread_address_space (ptid_t ptid)
3317{
3318 struct address_space *aspace;
3319 struct inferior *inf;
3320 struct target_ops *t;
3321
3322 for (t = current_target.beneath; t != NULL; t = t->beneath)
3323 {
3324 if (t->to_thread_address_space != NULL)
3325 {
3326 aspace = t->to_thread_address_space (t, ptid);
3327 gdb_assert (aspace);
3328
3329 if (targetdebug)
3330 fprintf_unfiltered (gdb_stdlog,
3331 "target_thread_address_space (%s) = %d\n",
3332 target_pid_to_str (ptid),
3333 address_space_num (aspace));
3334 return aspace;
3335 }
3336 }
3337
3338 /* Fall-back to the "main" address space of the inferior. */
3339 inf = find_inferior_pid (ptid_get_pid (ptid));
3340
3341 if (inf == NULL || inf->aspace == NULL)
3342 internal_error (__FILE__, __LINE__,
3343 _("Can't determine the current "
3344 "address space of thread %s\n"),
3345 target_pid_to_str (ptid));
3346
3347 return inf->aspace;
3348}
3349
3350
3351/* Target file operations. */
3352
3353static struct target_ops *
3354default_fileio_target (void)
3355{
3356 /* If we're already connected to something that can perform
3357 file I/O, use it. Otherwise, try using the native target. */
3358 if (current_target.to_stratum >= process_stratum)
3359 return current_target.beneath;
3360 else
3361 return find_default_run_target ("file I/O");
3362}
3363
3364/* Open FILENAME on the target, using FLAGS and MODE. Return a
3365 target file descriptor, or -1 if an error occurs (and set
3366 *TARGET_ERRNO). */
3367int
3368target_fileio_open (const char *filename, int flags, int mode,
3369 int *target_errno)
3370{
3371 struct target_ops *t;
3372
3373 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3374 {
3375 if (t->to_fileio_open != NULL)
3376 {
3377 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3378
3379 if (targetdebug)
3380 fprintf_unfiltered (gdb_stdlog,
3381 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3382 filename, flags, mode,
3383 fd, fd != -1 ? 0 : *target_errno);
3384 return fd;
3385 }
3386 }
3387
3388 *target_errno = FILEIO_ENOSYS;
3389 return -1;
3390}
3391
3392/* Write up to LEN bytes from WRITE_BUF to FD on the target.
3393 Return the number of bytes written, or -1 if an error occurs
3394 (and set *TARGET_ERRNO). */
3395int
3396target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3397 ULONGEST offset, int *target_errno)
3398{
3399 struct target_ops *t;
3400
3401 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3402 {
3403 if (t->to_fileio_pwrite != NULL)
3404 {
3405 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3406 target_errno);
3407
3408 if (targetdebug)
3409 fprintf_unfiltered (gdb_stdlog,
3410 "target_fileio_pwrite (%d,...,%d,%s) "
3411 "= %d (%d)\n",
3412 fd, len, pulongest (offset),
3413 ret, ret != -1 ? 0 : *target_errno);
3414 return ret;
3415 }
3416 }
3417
3418 *target_errno = FILEIO_ENOSYS;
3419 return -1;
3420}
3421
3422/* Read up to LEN bytes FD on the target into READ_BUF.
3423 Return the number of bytes read, or -1 if an error occurs
3424 (and set *TARGET_ERRNO). */
3425int
3426target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3427 ULONGEST offset, int *target_errno)
3428{
3429 struct target_ops *t;
3430
3431 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3432 {
3433 if (t->to_fileio_pread != NULL)
3434 {
3435 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3436 target_errno);
3437
3438 if (targetdebug)
3439 fprintf_unfiltered (gdb_stdlog,
3440 "target_fileio_pread (%d,...,%d,%s) "
3441 "= %d (%d)\n",
3442 fd, len, pulongest (offset),
3443 ret, ret != -1 ? 0 : *target_errno);
3444 return ret;
3445 }
3446 }
3447
3448 *target_errno = FILEIO_ENOSYS;
3449 return -1;
3450}
3451
3452/* Close FD on the target. Return 0, or -1 if an error occurs
3453 (and set *TARGET_ERRNO). */
3454int
3455target_fileio_close (int fd, int *target_errno)
3456{
3457 struct target_ops *t;
3458
3459 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3460 {
3461 if (t->to_fileio_close != NULL)
3462 {
3463 int ret = t->to_fileio_close (t, fd, target_errno);
3464
3465 if (targetdebug)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "target_fileio_close (%d) = %d (%d)\n",
3468 fd, ret, ret != -1 ? 0 : *target_errno);
3469 return ret;
3470 }
3471 }
3472
3473 *target_errno = FILEIO_ENOSYS;
3474 return -1;
3475}
3476
3477/* Unlink FILENAME on the target. Return 0, or -1 if an error
3478 occurs (and set *TARGET_ERRNO). */
3479int
3480target_fileio_unlink (const char *filename, int *target_errno)
3481{
3482 struct target_ops *t;
3483
3484 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3485 {
3486 if (t->to_fileio_unlink != NULL)
3487 {
3488 int ret = t->to_fileio_unlink (t, filename, target_errno);
3489
3490 if (targetdebug)
3491 fprintf_unfiltered (gdb_stdlog,
3492 "target_fileio_unlink (%s) = %d (%d)\n",
3493 filename, ret, ret != -1 ? 0 : *target_errno);
3494 return ret;
3495 }
3496 }
3497
3498 *target_errno = FILEIO_ENOSYS;
3499 return -1;
3500}
3501
3502/* Read value of symbolic link FILENAME on the target. Return a
3503 null-terminated string allocated via xmalloc, or NULL if an error
3504 occurs (and set *TARGET_ERRNO). */
3505char *
3506target_fileio_readlink (const char *filename, int *target_errno)
3507{
3508 struct target_ops *t;
3509
3510 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3511 {
3512 if (t->to_fileio_readlink != NULL)
3513 {
3514 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3515
3516 if (targetdebug)
3517 fprintf_unfiltered (gdb_stdlog,
3518 "target_fileio_readlink (%s) = %s (%d)\n",
3519 filename, ret? ret : "(nil)",
3520 ret? 0 : *target_errno);
3521 return ret;
3522 }
3523 }
3524
3525 *target_errno = FILEIO_ENOSYS;
3526 return NULL;
3527}
3528
3529static void
3530target_fileio_close_cleanup (void *opaque)
3531{
3532 int fd = *(int *) opaque;
3533 int target_errno;
3534
3535 target_fileio_close (fd, &target_errno);
3536}
3537
3538/* Read target file FILENAME. Store the result in *BUF_P and
3539 return the size of the transferred data. PADDING additional bytes are
3540 available in *BUF_P. This is a helper function for
3541 target_fileio_read_alloc; see the declaration of that function for more
3542 information. */
3543
3544static LONGEST
3545target_fileio_read_alloc_1 (const char *filename,
3546 gdb_byte **buf_p, int padding)
3547{
3548 struct cleanup *close_cleanup;
3549 size_t buf_alloc, buf_pos;
3550 gdb_byte *buf;
3551 LONGEST n;
3552 int fd;
3553 int target_errno;
3554
3555 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3556 if (fd == -1)
3557 return -1;
3558
3559 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3560
3561 /* Start by reading up to 4K at a time. The target will throttle
3562 this number down if necessary. */
3563 buf_alloc = 4096;
3564 buf = xmalloc (buf_alloc);
3565 buf_pos = 0;
3566 while (1)
3567 {
3568 n = target_fileio_pread (fd, &buf[buf_pos],
3569 buf_alloc - buf_pos - padding, buf_pos,
3570 &target_errno);
3571 if (n < 0)
3572 {
3573 /* An error occurred. */
3574 do_cleanups (close_cleanup);
3575 xfree (buf);
3576 return -1;
3577 }
3578 else if (n == 0)
3579 {
3580 /* Read all there was. */
3581 do_cleanups (close_cleanup);
3582 if (buf_pos == 0)
3583 xfree (buf);
3584 else
3585 *buf_p = buf;
3586 return buf_pos;
3587 }
3588
3589 buf_pos += n;
3590
3591 /* If the buffer is filling up, expand it. */
3592 if (buf_alloc < buf_pos * 2)
3593 {
3594 buf_alloc *= 2;
3595 buf = xrealloc (buf, buf_alloc);
3596 }
3597
3598 QUIT;
3599 }
3600}
3601
3602/* Read target file FILENAME. Store the result in *BUF_P and return
3603 the size of the transferred data. See the declaration in "target.h"
3604 function for more information about the return value. */
3605
3606LONGEST
3607target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3608{
3609 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3610}
3611
3612/* Read target file FILENAME. The result is NUL-terminated and
3613 returned as a string, allocated using xmalloc. If an error occurs
3614 or the transfer is unsupported, NULL is returned. Empty objects
3615 are returned as allocated but empty strings. A warning is issued
3616 if the result contains any embedded NUL bytes. */
3617
3618char *
3619target_fileio_read_stralloc (const char *filename)
3620{
3621 gdb_byte *buffer;
3622 char *bufstr;
3623 LONGEST i, transferred;
3624
3625 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3626 bufstr = (char *) buffer;
3627
3628 if (transferred < 0)
3629 return NULL;
3630
3631 if (transferred == 0)
3632 return xstrdup ("");
3633
3634 bufstr[transferred] = 0;
3635
3636 /* Check for embedded NUL bytes; but allow trailing NULs. */
3637 for (i = strlen (bufstr); i < transferred; i++)
3638 if (bufstr[i] != 0)
3639 {
3640 warning (_("target file %s "
3641 "contained unexpected null characters"),
3642 filename);
3643 break;
3644 }
3645
3646 return bufstr;
3647}
3648
3649
3650static int
3651default_region_ok_for_hw_watchpoint (struct target_ops *self,
3652 CORE_ADDR addr, int len)
3653{
3654 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3655}
3656
3657static int
3658default_watchpoint_addr_within_range (struct target_ops *target,
3659 CORE_ADDR addr,
3660 CORE_ADDR start, int length)
3661{
3662 return addr >= start && addr < start + length;
3663}
3664
3665static struct gdbarch *
3666default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3667{
3668 return target_gdbarch ();
3669}
3670
3671static int
3672return_zero (void)
3673{
3674 return 0;
3675}
3676
3677static int
3678return_one (void)
3679{
3680 return 1;
3681}
3682
3683static int
3684return_minus_one (void)
3685{
3686 return -1;
3687}
3688
3689static void *
3690return_null (void)
3691{
3692 return 0;
3693}
3694
3695/*
3696 * Find the next target down the stack from the specified target.
3697 */
3698
3699struct target_ops *
3700find_target_beneath (struct target_ops *t)
3701{
3702 return t->beneath;
3703}
3704
3705/* See target.h. */
3706
3707struct target_ops *
3708find_target_at (enum strata stratum)
3709{
3710 struct target_ops *t;
3711
3712 for (t = current_target.beneath; t != NULL; t = t->beneath)
3713 if (t->to_stratum == stratum)
3714 return t;
3715
3716 return NULL;
3717}
3718
3719\f
3720/* The inferior process has died. Long live the inferior! */
3721
3722void
3723generic_mourn_inferior (void)
3724{
3725 ptid_t ptid;
3726
3727 ptid = inferior_ptid;
3728 inferior_ptid = null_ptid;
3729
3730 /* Mark breakpoints uninserted in case something tries to delete a
3731 breakpoint while we delete the inferior's threads (which would
3732 fail, since the inferior is long gone). */
3733 mark_breakpoints_out ();
3734
3735 if (!ptid_equal (ptid, null_ptid))
3736 {
3737 int pid = ptid_get_pid (ptid);
3738 exit_inferior (pid);
3739 }
3740
3741 /* Note this wipes step-resume breakpoints, so needs to be done
3742 after exit_inferior, which ends up referencing the step-resume
3743 breakpoints through clear_thread_inferior_resources. */
3744 breakpoint_init_inferior (inf_exited);
3745
3746 registers_changed ();
3747
3748 reopen_exec_file ();
3749 reinit_frame_cache ();
3750
3751 if (deprecated_detach_hook)
3752 deprecated_detach_hook ();
3753}
3754\f
3755/* Convert a normal process ID to a string. Returns the string in a
3756 static buffer. */
3757
3758char *
3759normal_pid_to_str (ptid_t ptid)
3760{
3761 static char buf[32];
3762
3763 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3764 return buf;
3765}
3766
3767static char *
3768dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3769{
3770 return normal_pid_to_str (ptid);
3771}
3772
3773/* Error-catcher for target_find_memory_regions. */
3774static int
3775dummy_find_memory_regions (struct target_ops *self,
3776 find_memory_region_ftype ignore1, void *ignore2)
3777{
3778 error (_("Command not implemented for this target."));
3779 return 0;
3780}
3781
3782/* Error-catcher for target_make_corefile_notes. */
3783static char *
3784dummy_make_corefile_notes (struct target_ops *self,
3785 bfd *ignore1, int *ignore2)
3786{
3787 error (_("Command not implemented for this target."));
3788 return NULL;
3789}
3790
3791/* Error-catcher for target_get_bookmark. */
3792static gdb_byte *
3793dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3794{
3795 tcomplain ();
3796 return NULL;
3797}
3798
3799/* Error-catcher for target_goto_bookmark. */
3800static void
3801dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3802{
3803 tcomplain ();
3804}
3805
3806/* Set up the handful of non-empty slots needed by the dummy target
3807 vector. */
3808
3809static void
3810init_dummy_target (void)
3811{
3812 dummy_target.to_shortname = "None";
3813 dummy_target.to_longname = "None";
3814 dummy_target.to_doc = "";
3815 dummy_target.to_attach = find_default_attach;
3816 dummy_target.to_detach =
3817 (void (*)(struct target_ops *, const char *, int))target_ignore;
3818 dummy_target.to_create_inferior = find_default_create_inferior;
3819 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3820 dummy_target.to_supports_disable_randomization
3821 = find_default_supports_disable_randomization;
3822 dummy_target.to_pid_to_str = dummy_pid_to_str;
3823 dummy_target.to_stratum = dummy_stratum;
3824 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3825 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3826 dummy_target.to_get_bookmark = dummy_get_bookmark;
3827 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3828 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3829 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3830 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3831 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3832 dummy_target.to_has_execution
3833 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3834 dummy_target.to_magic = OPS_MAGIC;
3835
3836 install_dummy_methods (&dummy_target);
3837}
3838\f
3839static void
3840debug_to_open (char *args, int from_tty)
3841{
3842 debug_target.to_open (args, from_tty);
3843
3844 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3845}
3846
3847void
3848target_close (struct target_ops *targ)
3849{
3850 gdb_assert (!target_is_pushed (targ));
3851
3852 if (targ->to_xclose != NULL)
3853 targ->to_xclose (targ);
3854 else if (targ->to_close != NULL)
3855 targ->to_close (targ);
3856
3857 if (targetdebug)
3858 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3859}
3860
3861void
3862target_attach (char *args, int from_tty)
3863{
3864 struct target_ops *t;
3865
3866 for (t = current_target.beneath; t != NULL; t = t->beneath)
3867 {
3868 if (t->to_attach != NULL)
3869 {
3870 t->to_attach (t, args, from_tty);
3871 if (targetdebug)
3872 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3873 args, from_tty);
3874 return;
3875 }
3876 }
3877
3878 internal_error (__FILE__, __LINE__,
3879 _("could not find a target to attach"));
3880}
3881
3882int
3883target_thread_alive (ptid_t ptid)
3884{
3885 struct target_ops *t;
3886
3887 for (t = current_target.beneath; t != NULL; t = t->beneath)
3888 {
3889 if (t->to_thread_alive != NULL)
3890 {
3891 int retval;
3892
3893 retval = t->to_thread_alive (t, ptid);
3894 if (targetdebug)
3895 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3896 ptid_get_pid (ptid), retval);
3897
3898 return retval;
3899 }
3900 }
3901
3902 return 0;
3903}
3904
3905void
3906target_find_new_threads (void)
3907{
3908 struct target_ops *t;
3909
3910 for (t = current_target.beneath; t != NULL; t = t->beneath)
3911 {
3912 if (t->to_find_new_threads != NULL)
3913 {
3914 t->to_find_new_threads (t);
3915 if (targetdebug)
3916 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3917
3918 return;
3919 }
3920 }
3921}
3922
3923void
3924target_stop (ptid_t ptid)
3925{
3926 if (!may_stop)
3927 {
3928 warning (_("May not interrupt or stop the target, ignoring attempt"));
3929 return;
3930 }
3931
3932 (*current_target.to_stop) (&current_target, ptid);
3933}
3934
3935static void
3936debug_to_post_attach (struct target_ops *self, int pid)
3937{
3938 debug_target.to_post_attach (&debug_target, pid);
3939
3940 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3941}
3942
3943/* Concatenate ELEM to LIST, a comma separate list, and return the
3944 result. The LIST incoming argument is released. */
3945
3946static char *
3947str_comma_list_concat_elem (char *list, const char *elem)
3948{
3949 if (list == NULL)
3950 return xstrdup (elem);
3951 else
3952 return reconcat (list, list, ", ", elem, (char *) NULL);
3953}
3954
3955/* Helper for target_options_to_string. If OPT is present in
3956 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3957 Returns the new resulting string. OPT is removed from
3958 TARGET_OPTIONS. */
3959
3960static char *
3961do_option (int *target_options, char *ret,
3962 int opt, char *opt_str)
3963{
3964 if ((*target_options & opt) != 0)
3965 {
3966 ret = str_comma_list_concat_elem (ret, opt_str);
3967 *target_options &= ~opt;
3968 }
3969
3970 return ret;
3971}
3972
3973char *
3974target_options_to_string (int target_options)
3975{
3976 char *ret = NULL;
3977
3978#define DO_TARG_OPTION(OPT) \
3979 ret = do_option (&target_options, ret, OPT, #OPT)
3980
3981 DO_TARG_OPTION (TARGET_WNOHANG);
3982
3983 if (target_options != 0)
3984 ret = str_comma_list_concat_elem (ret, "unknown???");
3985
3986 if (ret == NULL)
3987 ret = xstrdup ("");
3988 return ret;
3989}
3990
3991static void
3992debug_print_register (const char * func,
3993 struct regcache *regcache, int regno)
3994{
3995 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3996
3997 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3998 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3999 && gdbarch_register_name (gdbarch, regno) != NULL
4000 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
4001 fprintf_unfiltered (gdb_stdlog, "(%s)",
4002 gdbarch_register_name (gdbarch, regno));
4003 else
4004 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
4005 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
4006 {
4007 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4008 int i, size = register_size (gdbarch, regno);
4009 gdb_byte buf[MAX_REGISTER_SIZE];
4010
4011 regcache_raw_collect (regcache, regno, buf);
4012 fprintf_unfiltered (gdb_stdlog, " = ");
4013 for (i = 0; i < size; i++)
4014 {
4015 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4016 }
4017 if (size <= sizeof (LONGEST))
4018 {
4019 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4020
4021 fprintf_unfiltered (gdb_stdlog, " %s %s",
4022 core_addr_to_string_nz (val), plongest (val));
4023 }
4024 }
4025 fprintf_unfiltered (gdb_stdlog, "\n");
4026}
4027
4028void
4029target_fetch_registers (struct regcache *regcache, int regno)
4030{
4031 struct target_ops *t;
4032
4033 for (t = current_target.beneath; t != NULL; t = t->beneath)
4034 {
4035 if (t->to_fetch_registers != NULL)
4036 {
4037 t->to_fetch_registers (t, regcache, regno);
4038 if (targetdebug)
4039 debug_print_register ("target_fetch_registers", regcache, regno);
4040 return;
4041 }
4042 }
4043}
4044
4045void
4046target_store_registers (struct regcache *regcache, int regno)
4047{
4048 struct target_ops *t;
4049
4050 if (!may_write_registers)
4051 error (_("Writing to registers is not allowed (regno %d)"), regno);
4052
4053 current_target.to_store_registers (&current_target, regcache, regno);
4054 if (targetdebug)
4055 {
4056 debug_print_register ("target_store_registers", regcache, regno);
4057 }
4058}
4059
4060int
4061target_core_of_thread (ptid_t ptid)
4062{
4063 struct target_ops *t;
4064
4065 for (t = current_target.beneath; t != NULL; t = t->beneath)
4066 {
4067 if (t->to_core_of_thread != NULL)
4068 {
4069 int retval = t->to_core_of_thread (t, ptid);
4070
4071 if (targetdebug)
4072 fprintf_unfiltered (gdb_stdlog,
4073 "target_core_of_thread (%d) = %d\n",
4074 ptid_get_pid (ptid), retval);
4075 return retval;
4076 }
4077 }
4078
4079 return -1;
4080}
4081
4082int
4083target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4084{
4085 struct target_ops *t;
4086
4087 for (t = current_target.beneath; t != NULL; t = t->beneath)
4088 {
4089 if (t->to_verify_memory != NULL)
4090 {
4091 int retval = t->to_verify_memory (t, data, memaddr, size);
4092
4093 if (targetdebug)
4094 fprintf_unfiltered (gdb_stdlog,
4095 "target_verify_memory (%s, %s) = %d\n",
4096 paddress (target_gdbarch (), memaddr),
4097 pulongest (size),
4098 retval);
4099 return retval;
4100 }
4101 }
4102
4103 tcomplain ();
4104}
4105
4106/* The documentation for this function is in its prototype declaration in
4107 target.h. */
4108
4109int
4110target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4111{
4112 struct target_ops *t;
4113
4114 for (t = current_target.beneath; t != NULL; t = t->beneath)
4115 if (t->to_insert_mask_watchpoint != NULL)
4116 {
4117 int ret;
4118
4119 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4120
4121 if (targetdebug)
4122 fprintf_unfiltered (gdb_stdlog, "\
4123target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4124 core_addr_to_string (addr),
4125 core_addr_to_string (mask), rw, ret);
4126
4127 return ret;
4128 }
4129
4130 return 1;
4131}
4132
4133/* The documentation for this function is in its prototype declaration in
4134 target.h. */
4135
4136int
4137target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4138{
4139 struct target_ops *t;
4140
4141 for (t = current_target.beneath; t != NULL; t = t->beneath)
4142 if (t->to_remove_mask_watchpoint != NULL)
4143 {
4144 int ret;
4145
4146 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4147
4148 if (targetdebug)
4149 fprintf_unfiltered (gdb_stdlog, "\
4150target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4151 core_addr_to_string (addr),
4152 core_addr_to_string (mask), rw, ret);
4153
4154 return ret;
4155 }
4156
4157 return 1;
4158}
4159
4160/* The documentation for this function is in its prototype declaration
4161 in target.h. */
4162
4163int
4164target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4165{
4166 struct target_ops *t;
4167
4168 for (t = current_target.beneath; t != NULL; t = t->beneath)
4169 if (t->to_masked_watch_num_registers != NULL)
4170 return t->to_masked_watch_num_registers (t, addr, mask);
4171
4172 return -1;
4173}
4174
4175/* The documentation for this function is in its prototype declaration
4176 in target.h. */
4177
4178int
4179target_ranged_break_num_registers (void)
4180{
4181 struct target_ops *t;
4182
4183 for (t = current_target.beneath; t != NULL; t = t->beneath)
4184 if (t->to_ranged_break_num_registers != NULL)
4185 return t->to_ranged_break_num_registers (t);
4186
4187 return -1;
4188}
4189
4190/* See target.h. */
4191
4192struct btrace_target_info *
4193target_enable_btrace (ptid_t ptid)
4194{
4195 struct target_ops *t;
4196
4197 for (t = current_target.beneath; t != NULL; t = t->beneath)
4198 if (t->to_enable_btrace != NULL)
4199 return t->to_enable_btrace (ptid);
4200
4201 tcomplain ();
4202 return NULL;
4203}
4204
4205/* See target.h. */
4206
4207void
4208target_disable_btrace (struct btrace_target_info *btinfo)
4209{
4210 struct target_ops *t;
4211
4212 for (t = current_target.beneath; t != NULL; t = t->beneath)
4213 if (t->to_disable_btrace != NULL)
4214 {
4215 t->to_disable_btrace (btinfo);
4216 return;
4217 }
4218
4219 tcomplain ();
4220}
4221
4222/* See target.h. */
4223
4224void
4225target_teardown_btrace (struct btrace_target_info *btinfo)
4226{
4227 struct target_ops *t;
4228
4229 for (t = current_target.beneath; t != NULL; t = t->beneath)
4230 if (t->to_teardown_btrace != NULL)
4231 {
4232 t->to_teardown_btrace (btinfo);
4233 return;
4234 }
4235
4236 tcomplain ();
4237}
4238
4239/* See target.h. */
4240
4241enum btrace_error
4242target_read_btrace (VEC (btrace_block_s) **btrace,
4243 struct btrace_target_info *btinfo,
4244 enum btrace_read_type type)
4245{
4246 struct target_ops *t;
4247
4248 for (t = current_target.beneath; t != NULL; t = t->beneath)
4249 if (t->to_read_btrace != NULL)
4250 return t->to_read_btrace (btrace, btinfo, type);
4251
4252 tcomplain ();
4253 return BTRACE_ERR_NOT_SUPPORTED;
4254}
4255
4256/* See target.h. */
4257
4258void
4259target_stop_recording (void)
4260{
4261 struct target_ops *t;
4262
4263 for (t = current_target.beneath; t != NULL; t = t->beneath)
4264 if (t->to_stop_recording != NULL)
4265 {
4266 t->to_stop_recording ();
4267 return;
4268 }
4269
4270 /* This is optional. */
4271}
4272
4273/* See target.h. */
4274
4275void
4276target_info_record (void)
4277{
4278 struct target_ops *t;
4279
4280 for (t = current_target.beneath; t != NULL; t = t->beneath)
4281 if (t->to_info_record != NULL)
4282 {
4283 t->to_info_record ();
4284 return;
4285 }
4286
4287 tcomplain ();
4288}
4289
4290/* See target.h. */
4291
4292void
4293target_save_record (const char *filename)
4294{
4295 struct target_ops *t;
4296
4297 for (t = current_target.beneath; t != NULL; t = t->beneath)
4298 if (t->to_save_record != NULL)
4299 {
4300 t->to_save_record (filename);
4301 return;
4302 }
4303
4304 tcomplain ();
4305}
4306
4307/* See target.h. */
4308
4309int
4310target_supports_delete_record (void)
4311{
4312 struct target_ops *t;
4313
4314 for (t = current_target.beneath; t != NULL; t = t->beneath)
4315 if (t->to_delete_record != NULL)
4316 return 1;
4317
4318 return 0;
4319}
4320
4321/* See target.h. */
4322
4323void
4324target_delete_record (void)
4325{
4326 struct target_ops *t;
4327
4328 for (t = current_target.beneath; t != NULL; t = t->beneath)
4329 if (t->to_delete_record != NULL)
4330 {
4331 t->to_delete_record ();
4332 return;
4333 }
4334
4335 tcomplain ();
4336}
4337
4338/* See target.h. */
4339
4340int
4341target_record_is_replaying (void)
4342{
4343 struct target_ops *t;
4344
4345 for (t = current_target.beneath; t != NULL; t = t->beneath)
4346 if (t->to_record_is_replaying != NULL)
4347 return t->to_record_is_replaying ();
4348
4349 return 0;
4350}
4351
4352/* See target.h. */
4353
4354void
4355target_goto_record_begin (void)
4356{
4357 struct target_ops *t;
4358
4359 for (t = current_target.beneath; t != NULL; t = t->beneath)
4360 if (t->to_goto_record_begin != NULL)
4361 {
4362 t->to_goto_record_begin ();
4363 return;
4364 }
4365
4366 tcomplain ();
4367}
4368
4369/* See target.h. */
4370
4371void
4372target_goto_record_end (void)
4373{
4374 struct target_ops *t;
4375
4376 for (t = current_target.beneath; t != NULL; t = t->beneath)
4377 if (t->to_goto_record_end != NULL)
4378 {
4379 t->to_goto_record_end ();
4380 return;
4381 }
4382
4383 tcomplain ();
4384}
4385
4386/* See target.h. */
4387
4388void
4389target_goto_record (ULONGEST insn)
4390{
4391 struct target_ops *t;
4392
4393 for (t = current_target.beneath; t != NULL; t = t->beneath)
4394 if (t->to_goto_record != NULL)
4395 {
4396 t->to_goto_record (insn);
4397 return;
4398 }
4399
4400 tcomplain ();
4401}
4402
4403/* See target.h. */
4404
4405void
4406target_insn_history (int size, int flags)
4407{
4408 struct target_ops *t;
4409
4410 for (t = current_target.beneath; t != NULL; t = t->beneath)
4411 if (t->to_insn_history != NULL)
4412 {
4413 t->to_insn_history (size, flags);
4414 return;
4415 }
4416
4417 tcomplain ();
4418}
4419
4420/* See target.h. */
4421
4422void
4423target_insn_history_from (ULONGEST from, int size, int flags)
4424{
4425 struct target_ops *t;
4426
4427 for (t = current_target.beneath; t != NULL; t = t->beneath)
4428 if (t->to_insn_history_from != NULL)
4429 {
4430 t->to_insn_history_from (from, size, flags);
4431 return;
4432 }
4433
4434 tcomplain ();
4435}
4436
4437/* See target.h. */
4438
4439void
4440target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4441{
4442 struct target_ops *t;
4443
4444 for (t = current_target.beneath; t != NULL; t = t->beneath)
4445 if (t->to_insn_history_range != NULL)
4446 {
4447 t->to_insn_history_range (begin, end, flags);
4448 return;
4449 }
4450
4451 tcomplain ();
4452}
4453
4454/* See target.h. */
4455
4456void
4457target_call_history (int size, int flags)
4458{
4459 struct target_ops *t;
4460
4461 for (t = current_target.beneath; t != NULL; t = t->beneath)
4462 if (t->to_call_history != NULL)
4463 {
4464 t->to_call_history (size, flags);
4465 return;
4466 }
4467
4468 tcomplain ();
4469}
4470
4471/* See target.h. */
4472
4473void
4474target_call_history_from (ULONGEST begin, int size, int flags)
4475{
4476 struct target_ops *t;
4477
4478 for (t = current_target.beneath; t != NULL; t = t->beneath)
4479 if (t->to_call_history_from != NULL)
4480 {
4481 t->to_call_history_from (begin, size, flags);
4482 return;
4483 }
4484
4485 tcomplain ();
4486}
4487
4488/* See target.h. */
4489
4490void
4491target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4492{
4493 struct target_ops *t;
4494
4495 for (t = current_target.beneath; t != NULL; t = t->beneath)
4496 if (t->to_call_history_range != NULL)
4497 {
4498 t->to_call_history_range (begin, end, flags);
4499 return;
4500 }
4501
4502 tcomplain ();
4503}
4504
4505static void
4506debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4507{
4508 debug_target.to_prepare_to_store (&debug_target, regcache);
4509
4510 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4511}
4512
4513/* See target.h. */
4514
4515const struct frame_unwind *
4516target_get_unwinder (void)
4517{
4518 struct target_ops *t;
4519
4520 for (t = current_target.beneath; t != NULL; t = t->beneath)
4521 if (t->to_get_unwinder != NULL)
4522 return t->to_get_unwinder;
4523
4524 return NULL;
4525}
4526
4527/* See target.h. */
4528
4529const struct frame_unwind *
4530target_get_tailcall_unwinder (void)
4531{
4532 struct target_ops *t;
4533
4534 for (t = current_target.beneath; t != NULL; t = t->beneath)
4535 if (t->to_get_tailcall_unwinder != NULL)
4536 return t->to_get_tailcall_unwinder;
4537
4538 return NULL;
4539}
4540
4541/* See target.h. */
4542
4543CORE_ADDR
4544forward_target_decr_pc_after_break (struct target_ops *ops,
4545 struct gdbarch *gdbarch)
4546{
4547 for (; ops != NULL; ops = ops->beneath)
4548 if (ops->to_decr_pc_after_break != NULL)
4549 return ops->to_decr_pc_after_break (ops, gdbarch);
4550
4551 return gdbarch_decr_pc_after_break (gdbarch);
4552}
4553
4554/* See target.h. */
4555
4556CORE_ADDR
4557target_decr_pc_after_break (struct gdbarch *gdbarch)
4558{
4559 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4560}
4561
4562static int
4563deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4564 int write, struct mem_attrib *attrib,
4565 struct target_ops *target)
4566{
4567 int retval;
4568
4569 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4570 attrib, target);
4571
4572 fprintf_unfiltered (gdb_stdlog,
4573 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4574 paddress (target_gdbarch (), memaddr), len,
4575 write ? "write" : "read", retval);
4576
4577 if (retval > 0)
4578 {
4579 int i;
4580
4581 fputs_unfiltered (", bytes =", gdb_stdlog);
4582 for (i = 0; i < retval; i++)
4583 {
4584 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4585 {
4586 if (targetdebug < 2 && i > 0)
4587 {
4588 fprintf_unfiltered (gdb_stdlog, " ...");
4589 break;
4590 }
4591 fprintf_unfiltered (gdb_stdlog, "\n");
4592 }
4593
4594 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4595 }
4596 }
4597
4598 fputc_unfiltered ('\n', gdb_stdlog);
4599
4600 return retval;
4601}
4602
4603static void
4604debug_to_files_info (struct target_ops *target)
4605{
4606 debug_target.to_files_info (target);
4607
4608 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4609}
4610
4611static int
4612debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4613 struct bp_target_info *bp_tgt)
4614{
4615 int retval;
4616
4617 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4618
4619 fprintf_unfiltered (gdb_stdlog,
4620 "target_insert_breakpoint (%s, xxx) = %ld\n",
4621 core_addr_to_string (bp_tgt->placed_address),
4622 (unsigned long) retval);
4623 return retval;
4624}
4625
4626static int
4627debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4628 struct bp_target_info *bp_tgt)
4629{
4630 int retval;
4631
4632 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4633
4634 fprintf_unfiltered (gdb_stdlog,
4635 "target_remove_breakpoint (%s, xxx) = %ld\n",
4636 core_addr_to_string (bp_tgt->placed_address),
4637 (unsigned long) retval);
4638 return retval;
4639}
4640
4641static int
4642debug_to_can_use_hw_breakpoint (struct target_ops *self,
4643 int type, int cnt, int from_tty)
4644{
4645 int retval;
4646
4647 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4648 type, cnt, from_tty);
4649
4650 fprintf_unfiltered (gdb_stdlog,
4651 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4652 (unsigned long) type,
4653 (unsigned long) cnt,
4654 (unsigned long) from_tty,
4655 (unsigned long) retval);
4656 return retval;
4657}
4658
4659static int
4660debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4661 CORE_ADDR addr, int len)
4662{
4663 CORE_ADDR retval;
4664
4665 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4666 addr, len);
4667
4668 fprintf_unfiltered (gdb_stdlog,
4669 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4670 core_addr_to_string (addr), (unsigned long) len,
4671 core_addr_to_string (retval));
4672 return retval;
4673}
4674
4675static int
4676debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4677 CORE_ADDR addr, int len, int rw,
4678 struct expression *cond)
4679{
4680 int retval;
4681
4682 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4683 addr, len,
4684 rw, cond);
4685
4686 fprintf_unfiltered (gdb_stdlog,
4687 "target_can_accel_watchpoint_condition "
4688 "(%s, %d, %d, %s) = %ld\n",
4689 core_addr_to_string (addr), len, rw,
4690 host_address_to_string (cond), (unsigned long) retval);
4691 return retval;
4692}
4693
4694static int
4695debug_to_stopped_by_watchpoint (struct target_ops *ops)
4696{
4697 int retval;
4698
4699 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4700
4701 fprintf_unfiltered (gdb_stdlog,
4702 "target_stopped_by_watchpoint () = %ld\n",
4703 (unsigned long) retval);
4704 return retval;
4705}
4706
4707static int
4708debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4709{
4710 int retval;
4711
4712 retval = debug_target.to_stopped_data_address (target, addr);
4713
4714 fprintf_unfiltered (gdb_stdlog,
4715 "target_stopped_data_address ([%s]) = %ld\n",
4716 core_addr_to_string (*addr),
4717 (unsigned long)retval);
4718 return retval;
4719}
4720
4721static int
4722debug_to_watchpoint_addr_within_range (struct target_ops *target,
4723 CORE_ADDR addr,
4724 CORE_ADDR start, int length)
4725{
4726 int retval;
4727
4728 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4729 start, length);
4730
4731 fprintf_filtered (gdb_stdlog,
4732 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4733 core_addr_to_string (addr), core_addr_to_string (start),
4734 length, retval);
4735 return retval;
4736}
4737
4738static int
4739debug_to_insert_hw_breakpoint (struct target_ops *self,
4740 struct gdbarch *gdbarch,
4741 struct bp_target_info *bp_tgt)
4742{
4743 int retval;
4744
4745 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4746 gdbarch, bp_tgt);
4747
4748 fprintf_unfiltered (gdb_stdlog,
4749 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4750 core_addr_to_string (bp_tgt->placed_address),
4751 (unsigned long) retval);
4752 return retval;
4753}
4754
4755static int
4756debug_to_remove_hw_breakpoint (struct target_ops *self,
4757 struct gdbarch *gdbarch,
4758 struct bp_target_info *bp_tgt)
4759{
4760 int retval;
4761
4762 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4763 gdbarch, bp_tgt);
4764
4765 fprintf_unfiltered (gdb_stdlog,
4766 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4767 core_addr_to_string (bp_tgt->placed_address),
4768 (unsigned long) retval);
4769 return retval;
4770}
4771
4772static int
4773debug_to_insert_watchpoint (struct target_ops *self,
4774 CORE_ADDR addr, int len, int type,
4775 struct expression *cond)
4776{
4777 int retval;
4778
4779 retval = debug_target.to_insert_watchpoint (&debug_target,
4780 addr, len, type, cond);
4781
4782 fprintf_unfiltered (gdb_stdlog,
4783 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4784 core_addr_to_string (addr), len, type,
4785 host_address_to_string (cond), (unsigned long) retval);
4786 return retval;
4787}
4788
4789static int
4790debug_to_remove_watchpoint (struct target_ops *self,
4791 CORE_ADDR addr, int len, int type,
4792 struct expression *cond)
4793{
4794 int retval;
4795
4796 retval = debug_target.to_remove_watchpoint (&debug_target,
4797 addr, len, type, cond);
4798
4799 fprintf_unfiltered (gdb_stdlog,
4800 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4801 core_addr_to_string (addr), len, type,
4802 host_address_to_string (cond), (unsigned long) retval);
4803 return retval;
4804}
4805
4806static void
4807debug_to_terminal_init (struct target_ops *self)
4808{
4809 debug_target.to_terminal_init (&debug_target);
4810
4811 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4812}
4813
4814static void
4815debug_to_terminal_inferior (struct target_ops *self)
4816{
4817 debug_target.to_terminal_inferior (&debug_target);
4818
4819 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4820}
4821
4822static void
4823debug_to_terminal_ours_for_output (struct target_ops *self)
4824{
4825 debug_target.to_terminal_ours_for_output (&debug_target);
4826
4827 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4828}
4829
4830static void
4831debug_to_terminal_ours (struct target_ops *self)
4832{
4833 debug_target.to_terminal_ours (&debug_target);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4836}
4837
4838static void
4839debug_to_terminal_save_ours (struct target_ops *self)
4840{
4841 debug_target.to_terminal_save_ours (&debug_target);
4842
4843 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4844}
4845
4846static void
4847debug_to_terminal_info (struct target_ops *self,
4848 const char *arg, int from_tty)
4849{
4850 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4851
4852 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4853 from_tty);
4854}
4855
4856static void
4857debug_to_load (struct target_ops *self, char *args, int from_tty)
4858{
4859 debug_target.to_load (&debug_target, args, from_tty);
4860
4861 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4862}
4863
4864static void
4865debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4866{
4867 debug_target.to_post_startup_inferior (&debug_target, ptid);
4868
4869 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4870 ptid_get_pid (ptid));
4871}
4872
4873static int
4874debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4875{
4876 int retval;
4877
4878 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4879
4880 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4881 pid, retval);
4882
4883 return retval;
4884}
4885
4886static int
4887debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4888{
4889 int retval;
4890
4891 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4892
4893 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4894 pid, retval);
4895
4896 return retval;
4897}
4898
4899static int
4900debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4901{
4902 int retval;
4903
4904 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4905
4906 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4907 pid, retval);
4908
4909 return retval;
4910}
4911
4912static int
4913debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4914{
4915 int retval;
4916
4917 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4918
4919 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4920 pid, retval);
4921
4922 return retval;
4923}
4924
4925static int
4926debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4927{
4928 int retval;
4929
4930 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4931
4932 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4933 pid, retval);
4934
4935 return retval;
4936}
4937
4938static int
4939debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4940{
4941 int retval;
4942
4943 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4944
4945 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4946 pid, retval);
4947
4948 return retval;
4949}
4950
4951static int
4952debug_to_has_exited (struct target_ops *self,
4953 int pid, int wait_status, int *exit_status)
4954{
4955 int has_exited;
4956
4957 has_exited = debug_target.to_has_exited (&debug_target,
4958 pid, wait_status, exit_status);
4959
4960 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4961 pid, wait_status, *exit_status, has_exited);
4962
4963 return has_exited;
4964}
4965
4966static int
4967debug_to_can_run (struct target_ops *self)
4968{
4969 int retval;
4970
4971 retval = debug_target.to_can_run (&debug_target);
4972
4973 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4974
4975 return retval;
4976}
4977
4978static struct gdbarch *
4979debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4980{
4981 struct gdbarch *retval;
4982
4983 retval = debug_target.to_thread_architecture (ops, ptid);
4984
4985 fprintf_unfiltered (gdb_stdlog,
4986 "target_thread_architecture (%s) = %s [%s]\n",
4987 target_pid_to_str (ptid),
4988 host_address_to_string (retval),
4989 gdbarch_bfd_arch_info (retval)->printable_name);
4990 return retval;
4991}
4992
4993static void
4994debug_to_stop (struct target_ops *self, ptid_t ptid)
4995{
4996 debug_target.to_stop (&debug_target, ptid);
4997
4998 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4999 target_pid_to_str (ptid));
5000}
5001
5002static void
5003debug_to_rcmd (struct target_ops *self, char *command,
5004 struct ui_file *outbuf)
5005{
5006 debug_target.to_rcmd (&debug_target, command, outbuf);
5007 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
5008}
5009
5010static char *
5011debug_to_pid_to_exec_file (struct target_ops *self, int pid)
5012{
5013 char *exec_file;
5014
5015 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
5016
5017 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5018 pid, exec_file);
5019
5020 return exec_file;
5021}
5022
5023static void
5024setup_target_debug (void)
5025{
5026 memcpy (&debug_target, &current_target, sizeof debug_target);
5027
5028 current_target.to_open = debug_to_open;
5029 current_target.to_post_attach = debug_to_post_attach;
5030 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5031 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5032 current_target.to_files_info = debug_to_files_info;
5033 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5034 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5035 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5036 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5037 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5038 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5039 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5040 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5041 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5042 current_target.to_watchpoint_addr_within_range
5043 = debug_to_watchpoint_addr_within_range;
5044 current_target.to_region_ok_for_hw_watchpoint
5045 = debug_to_region_ok_for_hw_watchpoint;
5046 current_target.to_can_accel_watchpoint_condition
5047 = debug_to_can_accel_watchpoint_condition;
5048 current_target.to_terminal_init = debug_to_terminal_init;
5049 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5050 current_target.to_terminal_ours_for_output
5051 = debug_to_terminal_ours_for_output;
5052 current_target.to_terminal_ours = debug_to_terminal_ours;
5053 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5054 current_target.to_terminal_info = debug_to_terminal_info;
5055 current_target.to_load = debug_to_load;
5056 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5057 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5058 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5059 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5060 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5061 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5062 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5063 current_target.to_has_exited = debug_to_has_exited;
5064 current_target.to_can_run = debug_to_can_run;
5065 current_target.to_stop = debug_to_stop;
5066 current_target.to_rcmd = debug_to_rcmd;
5067 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5068 current_target.to_thread_architecture = debug_to_thread_architecture;
5069}
5070\f
5071
5072static char targ_desc[] =
5073"Names of targets and files being debugged.\nShows the entire \
5074stack of targets currently in use (including the exec-file,\n\
5075core-file, and process, if any), as well as the symbol file name.";
5076
5077static void
5078do_monitor_command (char *cmd,
5079 int from_tty)
5080{
5081 if ((current_target.to_rcmd
5082 == (void (*) (struct target_ops *, char *, struct ui_file *)) tcomplain)
5083 || (current_target.to_rcmd == debug_to_rcmd
5084 && (debug_target.to_rcmd
5085 == (void (*) (struct target_ops *,
5086 char *, struct ui_file *)) tcomplain)))
5087 error (_("\"monitor\" command not supported by this target."));
5088 target_rcmd (cmd, gdb_stdtarg);
5089}
5090
5091/* Print the name of each layers of our target stack. */
5092
5093static void
5094maintenance_print_target_stack (char *cmd, int from_tty)
5095{
5096 struct target_ops *t;
5097
5098 printf_filtered (_("The current target stack is:\n"));
5099
5100 for (t = target_stack; t != NULL; t = t->beneath)
5101 {
5102 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5103 }
5104}
5105
5106/* Controls if async mode is permitted. */
5107int target_async_permitted = 0;
5108
5109/* The set command writes to this variable. If the inferior is
5110 executing, target_async_permitted is *not* updated. */
5111static int target_async_permitted_1 = 0;
5112
5113static void
5114set_target_async_command (char *args, int from_tty,
5115 struct cmd_list_element *c)
5116{
5117 if (have_live_inferiors ())
5118 {
5119 target_async_permitted_1 = target_async_permitted;
5120 error (_("Cannot change this setting while the inferior is running."));
5121 }
5122
5123 target_async_permitted = target_async_permitted_1;
5124}
5125
5126static void
5127show_target_async_command (struct ui_file *file, int from_tty,
5128 struct cmd_list_element *c,
5129 const char *value)
5130{
5131 fprintf_filtered (file,
5132 _("Controlling the inferior in "
5133 "asynchronous mode is %s.\n"), value);
5134}
5135
5136/* Temporary copies of permission settings. */
5137
5138static int may_write_registers_1 = 1;
5139static int may_write_memory_1 = 1;
5140static int may_insert_breakpoints_1 = 1;
5141static int may_insert_tracepoints_1 = 1;
5142static int may_insert_fast_tracepoints_1 = 1;
5143static int may_stop_1 = 1;
5144
5145/* Make the user-set values match the real values again. */
5146
5147void
5148update_target_permissions (void)
5149{
5150 may_write_registers_1 = may_write_registers;
5151 may_write_memory_1 = may_write_memory;
5152 may_insert_breakpoints_1 = may_insert_breakpoints;
5153 may_insert_tracepoints_1 = may_insert_tracepoints;
5154 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5155 may_stop_1 = may_stop;
5156}
5157
5158/* The one function handles (most of) the permission flags in the same
5159 way. */
5160
5161static void
5162set_target_permissions (char *args, int from_tty,
5163 struct cmd_list_element *c)
5164{
5165 if (target_has_execution)
5166 {
5167 update_target_permissions ();
5168 error (_("Cannot change this setting while the inferior is running."));
5169 }
5170
5171 /* Make the real values match the user-changed values. */
5172 may_write_registers = may_write_registers_1;
5173 may_insert_breakpoints = may_insert_breakpoints_1;
5174 may_insert_tracepoints = may_insert_tracepoints_1;
5175 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5176 may_stop = may_stop_1;
5177 update_observer_mode ();
5178}
5179
5180/* Set memory write permission independently of observer mode. */
5181
5182static void
5183set_write_memory_permission (char *args, int from_tty,
5184 struct cmd_list_element *c)
5185{
5186 /* Make the real values match the user-changed values. */
5187 may_write_memory = may_write_memory_1;
5188 update_observer_mode ();
5189}
5190
5191
5192void
5193initialize_targets (void)
5194{
5195 init_dummy_target ();
5196 push_target (&dummy_target);
5197
5198 add_info ("target", target_info, targ_desc);
5199 add_info ("files", target_info, targ_desc);
5200
5201 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5202Set target debugging."), _("\
5203Show target debugging."), _("\
5204When non-zero, target debugging is enabled. Higher numbers are more\n\
5205verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5206command."),
5207 NULL,
5208 show_targetdebug,
5209 &setdebuglist, &showdebuglist);
5210
5211 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5212 &trust_readonly, _("\
5213Set mode for reading from readonly sections."), _("\
5214Show mode for reading from readonly sections."), _("\
5215When this mode is on, memory reads from readonly sections (such as .text)\n\
5216will be read from the object file instead of from the target. This will\n\
5217result in significant performance improvement for remote targets."),
5218 NULL,
5219 show_trust_readonly,
5220 &setlist, &showlist);
5221
5222 add_com ("monitor", class_obscure, do_monitor_command,
5223 _("Send a command to the remote monitor (remote targets only)."));
5224
5225 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5226 _("Print the name of each layer of the internal target stack."),
5227 &maintenanceprintlist);
5228
5229 add_setshow_boolean_cmd ("target-async", no_class,
5230 &target_async_permitted_1, _("\
5231Set whether gdb controls the inferior in asynchronous mode."), _("\
5232Show whether gdb controls the inferior in asynchronous mode."), _("\
5233Tells gdb whether to control the inferior in asynchronous mode."),
5234 set_target_async_command,
5235 show_target_async_command,
5236 &setlist,
5237 &showlist);
5238
5239 add_setshow_boolean_cmd ("may-write-registers", class_support,
5240 &may_write_registers_1, _("\
5241Set permission to write into registers."), _("\
5242Show permission to write into registers."), _("\
5243When this permission is on, GDB may write into the target's registers.\n\
5244Otherwise, any sort of write attempt will result in an error."),
5245 set_target_permissions, NULL,
5246 &setlist, &showlist);
5247
5248 add_setshow_boolean_cmd ("may-write-memory", class_support,
5249 &may_write_memory_1, _("\
5250Set permission to write into target memory."), _("\
5251Show permission to write into target memory."), _("\
5252When this permission is on, GDB may write into the target's memory.\n\
5253Otherwise, any sort of write attempt will result in an error."),
5254 set_write_memory_permission, NULL,
5255 &setlist, &showlist);
5256
5257 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5258 &may_insert_breakpoints_1, _("\
5259Set permission to insert breakpoints in the target."), _("\
5260Show permission to insert breakpoints in the target."), _("\
5261When this permission is on, GDB may insert breakpoints in the program.\n\
5262Otherwise, any sort of insertion attempt will result in an error."),
5263 set_target_permissions, NULL,
5264 &setlist, &showlist);
5265
5266 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5267 &may_insert_tracepoints_1, _("\
5268Set permission to insert tracepoints in the target."), _("\
5269Show permission to insert tracepoints in the target."), _("\
5270When this permission is on, GDB may insert tracepoints in the program.\n\
5271Otherwise, any sort of insertion attempt will result in an error."),
5272 set_target_permissions, NULL,
5273 &setlist, &showlist);
5274
5275 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5276 &may_insert_fast_tracepoints_1, _("\
5277Set permission to insert fast tracepoints in the target."), _("\
5278Show permission to insert fast tracepoints in the target."), _("\
5279When this permission is on, GDB may insert fast tracepoints.\n\
5280Otherwise, any sort of insertion attempt will result in an error."),
5281 set_target_permissions, NULL,
5282 &setlist, &showlist);
5283
5284 add_setshow_boolean_cmd ("may-interrupt", class_support,
5285 &may_stop_1, _("\
5286Set permission to interrupt or signal the target."), _("\
5287Show permission to interrupt or signal the target."), _("\
5288When this permission is on, GDB may interrupt/stop the target's execution.\n\
5289Otherwise, any attempt to interrupt or stop will be ignored."),
5290 set_target_permissions, NULL,
5291 &setlist, &showlist);
5292}
This page took 0.056969 seconds and 4 git commands to generate.