Add target_ops argument to to_extra_thread_info
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (void)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach, t);
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store, t);
601 INHERIT (deprecated_xfer_memory, t);
602 INHERIT (to_files_info, t);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint, t);
606 INHERIT (to_insert_hw_breakpoint, t);
607 INHERIT (to_remove_hw_breakpoint, t);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint, t);
610 INHERIT (to_remove_watchpoint, t);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range, t);
618 INHERIT (to_region_ok_for_hw_watchpoint, t);
619 INHERIT (to_can_accel_watchpoint_condition, t);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init, t);
622 INHERIT (to_terminal_inferior, t);
623 INHERIT (to_terminal_ours_for_output, t);
624 INHERIT (to_terminal_ours, t);
625 INHERIT (to_terminal_save_ours, t);
626 INHERIT (to_terminal_info, t);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load, t);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior, t);
631 INHERIT (to_insert_fork_catchpoint, t);
632 INHERIT (to_remove_fork_catchpoint, t);
633 INHERIT (to_insert_vfork_catchpoint, t);
634 INHERIT (to_remove_vfork_catchpoint, t);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint, t);
637 INHERIT (to_remove_exec_catchpoint, t);
638 INHERIT (to_set_syscall_catchpoint, t);
639 INHERIT (to_has_exited, t);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info, t);
648 INHERIT (to_thread_name, t);
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd, t);
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_post_attach,
732 (void (*) (struct target_ops *, int))
733 target_ignore);
734 de_fault (to_prepare_to_store,
735 (void (*) (struct target_ops *, struct regcache *))
736 noprocess);
737 de_fault (deprecated_xfer_memory,
738 (int (*) (CORE_ADDR, gdb_byte *, int, int,
739 struct mem_attrib *, struct target_ops *))
740 nomemory);
741 de_fault (to_files_info,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (struct target_ops *, int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_remove_hw_breakpoint,
752 (int (*) (struct target_ops *, struct gdbarch *,
753 struct bp_target_info *))
754 return_minus_one);
755 de_fault (to_insert_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_remove_watchpoint,
760 (int (*) (struct target_ops *, CORE_ADDR, int, int,
761 struct expression *))
762 return_minus_one);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (struct target_ops *, CORE_ADDR, int, int,
769 struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (struct target_ops *))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (struct target_ops *))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (struct target_ops *))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (struct target_ops *))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (struct target_ops *, char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (struct target_ops *, ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (struct target_ops *, int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (struct target_ops *, int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (struct target_ops *, int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (struct target_ops *, int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (struct target_ops *, int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (struct target_ops *, int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (struct target_ops *, int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (struct target_ops *, int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 (int (*) (struct target_ops *))
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct target_ops *, struct thread_info *))
823 return_null);
824 de_fault (to_thread_name,
825 (char *(*) (struct thread_info *))
826 return_null);
827 de_fault (to_stop,
828 (void (*) (ptid_t))
829 target_ignore);
830 de_fault (to_rcmd,
831 (void (*) (char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (int))
835 return_null);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (void))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (void))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (void))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (void))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (void))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (void))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (void))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct breakpoint *, struct uploaded_tp *))
880 tcomplain);
881 de_fault (to_trace_stop,
882 (void (*) (void))
883 tcomplain);
884 de_fault (to_trace_find,
885 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
886 return_minus_one);
887 de_fault (to_get_trace_state_variable_value,
888 (int (*) (int, LONGEST *))
889 return_zero);
890 de_fault (to_save_trace_data,
891 (int (*) (const char *))
892 tcomplain);
893 de_fault (to_upload_tracepoints,
894 (int (*) (struct uploaded_tp **))
895 return_zero);
896 de_fault (to_upload_trace_state_variables,
897 (int (*) (struct uploaded_tsv **))
898 return_zero);
899 de_fault (to_get_raw_trace_data,
900 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
901 tcomplain);
902 de_fault (to_get_min_fast_tracepoint_insn_len,
903 (int (*) (void))
904 return_minus_one);
905 de_fault (to_set_disconnected_tracing,
906 (void (*) (int))
907 target_ignore);
908 de_fault (to_set_circular_trace_buffer,
909 (void (*) (int))
910 target_ignore);
911 de_fault (to_set_trace_buffer_size,
912 (void (*) (LONGEST))
913 target_ignore);
914 de_fault (to_set_trace_notes,
915 (int (*) (const char *, const char *, const char *))
916 return_zero);
917 de_fault (to_get_tib_address,
918 (int (*) (ptid_t, CORE_ADDR *))
919 tcomplain);
920 de_fault (to_set_permissions,
921 (void (*) (void))
922 target_ignore);
923 de_fault (to_static_tracepoint_marker_at,
924 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
925 return_zero);
926 de_fault (to_static_tracepoint_markers_by_strid,
927 (VEC(static_tracepoint_marker_p) * (*) (const char *))
928 tcomplain);
929 de_fault (to_traceframe_info,
930 (struct traceframe_info * (*) (void))
931 return_null);
932 de_fault (to_supports_evaluation_of_breakpoint_conditions,
933 (int (*) (void))
934 return_zero);
935 de_fault (to_can_run_breakpoint_commands,
936 (int (*) (void))
937 return_zero);
938 de_fault (to_use_agent,
939 (int (*) (int))
940 tcomplain);
941 de_fault (to_can_use_agent,
942 (int (*) (void))
943 return_zero);
944 de_fault (to_augmented_libraries_svr4_read,
945 (int (*) (void))
946 return_zero);
947 de_fault (to_execution_direction, default_execution_direction);
948
949 #undef de_fault
950
951 /* Finally, position the target-stack beneath the squashed
952 "current_target". That way code looking for a non-inherited
953 target method can quickly and simply find it. */
954 current_target.beneath = target_stack;
955
956 if (targetdebug)
957 setup_target_debug ();
958 }
959
960 /* Push a new target type into the stack of the existing target accessors,
961 possibly superseding some of the existing accessors.
962
963 Rather than allow an empty stack, we always have the dummy target at
964 the bottom stratum, so we can call the function vectors without
965 checking them. */
966
967 void
968 push_target (struct target_ops *t)
969 {
970 struct target_ops **cur;
971
972 /* Check magic number. If wrong, it probably means someone changed
973 the struct definition, but not all the places that initialize one. */
974 if (t->to_magic != OPS_MAGIC)
975 {
976 fprintf_unfiltered (gdb_stderr,
977 "Magic number of %s target struct wrong\n",
978 t->to_shortname);
979 internal_error (__FILE__, __LINE__,
980 _("failed internal consistency check"));
981 }
982
983 /* Find the proper stratum to install this target in. */
984 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
985 {
986 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
987 break;
988 }
989
990 /* If there's already targets at this stratum, remove them. */
991 /* FIXME: cagney/2003-10-15: I think this should be popping all
992 targets to CUR, and not just those at this stratum level. */
993 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
994 {
995 /* There's already something at this stratum level. Close it,
996 and un-hook it from the stack. */
997 struct target_ops *tmp = (*cur);
998
999 (*cur) = (*cur)->beneath;
1000 tmp->beneath = NULL;
1001 target_close (tmp);
1002 }
1003
1004 /* We have removed all targets in our stratum, now add the new one. */
1005 t->beneath = (*cur);
1006 (*cur) = t;
1007
1008 update_current_target ();
1009 }
1010
1011 /* Remove a target_ops vector from the stack, wherever it may be.
1012 Return how many times it was removed (0 or 1). */
1013
1014 int
1015 unpush_target (struct target_ops *t)
1016 {
1017 struct target_ops **cur;
1018 struct target_ops *tmp;
1019
1020 if (t->to_stratum == dummy_stratum)
1021 internal_error (__FILE__, __LINE__,
1022 _("Attempt to unpush the dummy target"));
1023
1024 /* Look for the specified target. Note that we assume that a target
1025 can only occur once in the target stack. */
1026
1027 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1028 {
1029 if ((*cur) == t)
1030 break;
1031 }
1032
1033 /* If we don't find target_ops, quit. Only open targets should be
1034 closed. */
1035 if ((*cur) == NULL)
1036 return 0;
1037
1038 /* Unchain the target. */
1039 tmp = (*cur);
1040 (*cur) = (*cur)->beneath;
1041 tmp->beneath = NULL;
1042
1043 update_current_target ();
1044
1045 /* Finally close the target. Note we do this after unchaining, so
1046 any target method calls from within the target_close
1047 implementation don't end up in T anymore. */
1048 target_close (t);
1049
1050 return 1;
1051 }
1052
1053 void
1054 pop_all_targets_above (enum strata above_stratum)
1055 {
1056 while ((int) (current_target.to_stratum) > (int) above_stratum)
1057 {
1058 if (!unpush_target (target_stack))
1059 {
1060 fprintf_unfiltered (gdb_stderr,
1061 "pop_all_targets couldn't find target %s\n",
1062 target_stack->to_shortname);
1063 internal_error (__FILE__, __LINE__,
1064 _("failed internal consistency check"));
1065 break;
1066 }
1067 }
1068 }
1069
1070 void
1071 pop_all_targets (void)
1072 {
1073 pop_all_targets_above (dummy_stratum);
1074 }
1075
1076 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1077
1078 int
1079 target_is_pushed (struct target_ops *t)
1080 {
1081 struct target_ops **cur;
1082
1083 /* Check magic number. If wrong, it probably means someone changed
1084 the struct definition, but not all the places that initialize one. */
1085 if (t->to_magic != OPS_MAGIC)
1086 {
1087 fprintf_unfiltered (gdb_stderr,
1088 "Magic number of %s target struct wrong\n",
1089 t->to_shortname);
1090 internal_error (__FILE__, __LINE__,
1091 _("failed internal consistency check"));
1092 }
1093
1094 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1095 if (*cur == t)
1096 return 1;
1097
1098 return 0;
1099 }
1100
1101 /* Using the objfile specified in OBJFILE, find the address for the
1102 current thread's thread-local storage with offset OFFSET. */
1103 CORE_ADDR
1104 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1105 {
1106 volatile CORE_ADDR addr = 0;
1107 struct target_ops *target;
1108
1109 for (target = current_target.beneath;
1110 target != NULL;
1111 target = target->beneath)
1112 {
1113 if (target->to_get_thread_local_address != NULL)
1114 break;
1115 }
1116
1117 if (target != NULL
1118 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1119 {
1120 ptid_t ptid = inferior_ptid;
1121 volatile struct gdb_exception ex;
1122
1123 TRY_CATCH (ex, RETURN_MASK_ALL)
1124 {
1125 CORE_ADDR lm_addr;
1126
1127 /* Fetch the load module address for this objfile. */
1128 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1129 objfile);
1130 /* If it's 0, throw the appropriate exception. */
1131 if (lm_addr == 0)
1132 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1133 _("TLS load module not found"));
1134
1135 addr = target->to_get_thread_local_address (target, ptid,
1136 lm_addr, offset);
1137 }
1138 /* If an error occurred, print TLS related messages here. Otherwise,
1139 throw the error to some higher catcher. */
1140 if (ex.reason < 0)
1141 {
1142 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1143
1144 switch (ex.error)
1145 {
1146 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1147 error (_("Cannot find thread-local variables "
1148 "in this thread library."));
1149 break;
1150 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1151 if (objfile_is_library)
1152 error (_("Cannot find shared library `%s' in dynamic"
1153 " linker's load module list"), objfile_name (objfile));
1154 else
1155 error (_("Cannot find executable file `%s' in dynamic"
1156 " linker's load module list"), objfile_name (objfile));
1157 break;
1158 case TLS_NOT_ALLOCATED_YET_ERROR:
1159 if (objfile_is_library)
1160 error (_("The inferior has not yet allocated storage for"
1161 " thread-local variables in\n"
1162 "the shared library `%s'\n"
1163 "for %s"),
1164 objfile_name (objfile), target_pid_to_str (ptid));
1165 else
1166 error (_("The inferior has not yet allocated storage for"
1167 " thread-local variables in\n"
1168 "the executable `%s'\n"
1169 "for %s"),
1170 objfile_name (objfile), target_pid_to_str (ptid));
1171 break;
1172 case TLS_GENERIC_ERROR:
1173 if (objfile_is_library)
1174 error (_("Cannot find thread-local storage for %s, "
1175 "shared library %s:\n%s"),
1176 target_pid_to_str (ptid),
1177 objfile_name (objfile), ex.message);
1178 else
1179 error (_("Cannot find thread-local storage for %s, "
1180 "executable file %s:\n%s"),
1181 target_pid_to_str (ptid),
1182 objfile_name (objfile), ex.message);
1183 break;
1184 default:
1185 throw_exception (ex);
1186 break;
1187 }
1188 }
1189 }
1190 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1191 TLS is an ABI-specific thing. But we don't do that yet. */
1192 else
1193 error (_("Cannot find thread-local variables on this target"));
1194
1195 return addr;
1196 }
1197
1198 const char *
1199 target_xfer_status_to_string (enum target_xfer_status err)
1200 {
1201 #define CASE(X) case X: return #X
1202 switch (err)
1203 {
1204 CASE(TARGET_XFER_E_IO);
1205 CASE(TARGET_XFER_E_UNAVAILABLE);
1206 default:
1207 return "<unknown>";
1208 }
1209 #undef CASE
1210 };
1211
1212
1213 #undef MIN
1214 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1215
1216 /* target_read_string -- read a null terminated string, up to LEN bytes,
1217 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1218 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1219 is responsible for freeing it. Return the number of bytes successfully
1220 read. */
1221
1222 int
1223 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1224 {
1225 int tlen, offset, i;
1226 gdb_byte buf[4];
1227 int errcode = 0;
1228 char *buffer;
1229 int buffer_allocated;
1230 char *bufptr;
1231 unsigned int nbytes_read = 0;
1232
1233 gdb_assert (string);
1234
1235 /* Small for testing. */
1236 buffer_allocated = 4;
1237 buffer = xmalloc (buffer_allocated);
1238 bufptr = buffer;
1239
1240 while (len > 0)
1241 {
1242 tlen = MIN (len, 4 - (memaddr & 3));
1243 offset = memaddr & 3;
1244
1245 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1246 if (errcode != 0)
1247 {
1248 /* The transfer request might have crossed the boundary to an
1249 unallocated region of memory. Retry the transfer, requesting
1250 a single byte. */
1251 tlen = 1;
1252 offset = 0;
1253 errcode = target_read_memory (memaddr, buf, 1);
1254 if (errcode != 0)
1255 goto done;
1256 }
1257
1258 if (bufptr - buffer + tlen > buffer_allocated)
1259 {
1260 unsigned int bytes;
1261
1262 bytes = bufptr - buffer;
1263 buffer_allocated *= 2;
1264 buffer = xrealloc (buffer, buffer_allocated);
1265 bufptr = buffer + bytes;
1266 }
1267
1268 for (i = 0; i < tlen; i++)
1269 {
1270 *bufptr++ = buf[i + offset];
1271 if (buf[i + offset] == '\000')
1272 {
1273 nbytes_read += i + 1;
1274 goto done;
1275 }
1276 }
1277
1278 memaddr += tlen;
1279 len -= tlen;
1280 nbytes_read += tlen;
1281 }
1282 done:
1283 *string = buffer;
1284 if (errnop != NULL)
1285 *errnop = errcode;
1286 return nbytes_read;
1287 }
1288
1289 struct target_section_table *
1290 target_get_section_table (struct target_ops *target)
1291 {
1292 struct target_ops *t;
1293
1294 if (targetdebug)
1295 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1296
1297 for (t = target; t != NULL; t = t->beneath)
1298 if (t->to_get_section_table != NULL)
1299 return (*t->to_get_section_table) (t);
1300
1301 return NULL;
1302 }
1303
1304 /* Find a section containing ADDR. */
1305
1306 struct target_section *
1307 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1308 {
1309 struct target_section_table *table = target_get_section_table (target);
1310 struct target_section *secp;
1311
1312 if (table == NULL)
1313 return NULL;
1314
1315 for (secp = table->sections; secp < table->sections_end; secp++)
1316 {
1317 if (addr >= secp->addr && addr < secp->endaddr)
1318 return secp;
1319 }
1320 return NULL;
1321 }
1322
1323 /* Read memory from the live target, even if currently inspecting a
1324 traceframe. The return is the same as that of target_read. */
1325
1326 static enum target_xfer_status
1327 target_read_live_memory (enum target_object object,
1328 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1329 ULONGEST *xfered_len)
1330 {
1331 enum target_xfer_status ret;
1332 struct cleanup *cleanup;
1333
1334 /* Switch momentarily out of tfind mode so to access live memory.
1335 Note that this must not clear global state, such as the frame
1336 cache, which must still remain valid for the previous traceframe.
1337 We may be _building_ the frame cache at this point. */
1338 cleanup = make_cleanup_restore_traceframe_number ();
1339 set_traceframe_number (-1);
1340
1341 ret = target_xfer_partial (current_target.beneath, object, NULL,
1342 myaddr, NULL, memaddr, len, xfered_len);
1343
1344 do_cleanups (cleanup);
1345 return ret;
1346 }
1347
1348 /* Using the set of read-only target sections of OPS, read live
1349 read-only memory. Note that the actual reads start from the
1350 top-most target again.
1351
1352 For interface/parameters/return description see target.h,
1353 to_xfer_partial. */
1354
1355 static enum target_xfer_status
1356 memory_xfer_live_readonly_partial (struct target_ops *ops,
1357 enum target_object object,
1358 gdb_byte *readbuf, ULONGEST memaddr,
1359 ULONGEST len, ULONGEST *xfered_len)
1360 {
1361 struct target_section *secp;
1362 struct target_section_table *table;
1363
1364 secp = target_section_by_addr (ops, memaddr);
1365 if (secp != NULL
1366 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1367 secp->the_bfd_section)
1368 & SEC_READONLY))
1369 {
1370 struct target_section *p;
1371 ULONGEST memend = memaddr + len;
1372
1373 table = target_get_section_table (ops);
1374
1375 for (p = table->sections; p < table->sections_end; p++)
1376 {
1377 if (memaddr >= p->addr)
1378 {
1379 if (memend <= p->endaddr)
1380 {
1381 /* Entire transfer is within this section. */
1382 return target_read_live_memory (object, memaddr,
1383 readbuf, len, xfered_len);
1384 }
1385 else if (memaddr >= p->endaddr)
1386 {
1387 /* This section ends before the transfer starts. */
1388 continue;
1389 }
1390 else
1391 {
1392 /* This section overlaps the transfer. Just do half. */
1393 len = p->endaddr - memaddr;
1394 return target_read_live_memory (object, memaddr,
1395 readbuf, len, xfered_len);
1396 }
1397 }
1398 }
1399 }
1400
1401 return TARGET_XFER_EOF;
1402 }
1403
1404 /* Read memory from more than one valid target. A core file, for
1405 instance, could have some of memory but delegate other bits to
1406 the target below it. So, we must manually try all targets. */
1407
1408 static enum target_xfer_status
1409 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1410 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1411 ULONGEST *xfered_len)
1412 {
1413 enum target_xfer_status res;
1414
1415 do
1416 {
1417 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1418 readbuf, writebuf, memaddr, len,
1419 xfered_len);
1420 if (res == TARGET_XFER_OK)
1421 break;
1422
1423 /* Stop if the target reports that the memory is not available. */
1424 if (res == TARGET_XFER_E_UNAVAILABLE)
1425 break;
1426
1427 /* We want to continue past core files to executables, but not
1428 past a running target's memory. */
1429 if (ops->to_has_all_memory (ops))
1430 break;
1431
1432 ops = ops->beneath;
1433 }
1434 while (ops != NULL);
1435
1436 return res;
1437 }
1438
1439 /* Perform a partial memory transfer.
1440 For docs see target.h, to_xfer_partial. */
1441
1442 static enum target_xfer_status
1443 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1444 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1445 ULONGEST len, ULONGEST *xfered_len)
1446 {
1447 enum target_xfer_status res;
1448 int reg_len;
1449 struct mem_region *region;
1450 struct inferior *inf;
1451
1452 /* For accesses to unmapped overlay sections, read directly from
1453 files. Must do this first, as MEMADDR may need adjustment. */
1454 if (readbuf != NULL && overlay_debugging)
1455 {
1456 struct obj_section *section = find_pc_overlay (memaddr);
1457
1458 if (pc_in_unmapped_range (memaddr, section))
1459 {
1460 struct target_section_table *table
1461 = target_get_section_table (ops);
1462 const char *section_name = section->the_bfd_section->name;
1463
1464 memaddr = overlay_mapped_address (memaddr, section);
1465 return section_table_xfer_memory_partial (readbuf, writebuf,
1466 memaddr, len, xfered_len,
1467 table->sections,
1468 table->sections_end,
1469 section_name);
1470 }
1471 }
1472
1473 /* Try the executable files, if "trust-readonly-sections" is set. */
1474 if (readbuf != NULL && trust_readonly)
1475 {
1476 struct target_section *secp;
1477 struct target_section_table *table;
1478
1479 secp = target_section_by_addr (ops, memaddr);
1480 if (secp != NULL
1481 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1482 secp->the_bfd_section)
1483 & SEC_READONLY))
1484 {
1485 table = target_get_section_table (ops);
1486 return section_table_xfer_memory_partial (readbuf, writebuf,
1487 memaddr, len, xfered_len,
1488 table->sections,
1489 table->sections_end,
1490 NULL);
1491 }
1492 }
1493
1494 /* If reading unavailable memory in the context of traceframes, and
1495 this address falls within a read-only section, fallback to
1496 reading from live memory. */
1497 if (readbuf != NULL && get_traceframe_number () != -1)
1498 {
1499 VEC(mem_range_s) *available;
1500
1501 /* If we fail to get the set of available memory, then the
1502 target does not support querying traceframe info, and so we
1503 attempt reading from the traceframe anyway (assuming the
1504 target implements the old QTro packet then). */
1505 if (traceframe_available_memory (&available, memaddr, len))
1506 {
1507 struct cleanup *old_chain;
1508
1509 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1510
1511 if (VEC_empty (mem_range_s, available)
1512 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1513 {
1514 /* Don't read into the traceframe's available
1515 memory. */
1516 if (!VEC_empty (mem_range_s, available))
1517 {
1518 LONGEST oldlen = len;
1519
1520 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1521 gdb_assert (len <= oldlen);
1522 }
1523
1524 do_cleanups (old_chain);
1525
1526 /* This goes through the topmost target again. */
1527 res = memory_xfer_live_readonly_partial (ops, object,
1528 readbuf, memaddr,
1529 len, xfered_len);
1530 if (res == TARGET_XFER_OK)
1531 return TARGET_XFER_OK;
1532 else
1533 {
1534 /* No use trying further, we know some memory starting
1535 at MEMADDR isn't available. */
1536 *xfered_len = len;
1537 return TARGET_XFER_E_UNAVAILABLE;
1538 }
1539 }
1540
1541 /* Don't try to read more than how much is available, in
1542 case the target implements the deprecated QTro packet to
1543 cater for older GDBs (the target's knowledge of read-only
1544 sections may be outdated by now). */
1545 len = VEC_index (mem_range_s, available, 0)->length;
1546
1547 do_cleanups (old_chain);
1548 }
1549 }
1550
1551 /* Try GDB's internal data cache. */
1552 region = lookup_mem_region (memaddr);
1553 /* region->hi == 0 means there's no upper bound. */
1554 if (memaddr + len < region->hi || region->hi == 0)
1555 reg_len = len;
1556 else
1557 reg_len = region->hi - memaddr;
1558
1559 switch (region->attrib.mode)
1560 {
1561 case MEM_RO:
1562 if (writebuf != NULL)
1563 return TARGET_XFER_E_IO;
1564 break;
1565
1566 case MEM_WO:
1567 if (readbuf != NULL)
1568 return TARGET_XFER_E_IO;
1569 break;
1570
1571 case MEM_FLASH:
1572 /* We only support writing to flash during "load" for now. */
1573 if (writebuf != NULL)
1574 error (_("Writing to flash memory forbidden in this context"));
1575 break;
1576
1577 case MEM_NONE:
1578 return TARGET_XFER_E_IO;
1579 }
1580
1581 if (!ptid_equal (inferior_ptid, null_ptid))
1582 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1583 else
1584 inf = NULL;
1585
1586 if (inf != NULL
1587 /* The dcache reads whole cache lines; that doesn't play well
1588 with reading from a trace buffer, because reading outside of
1589 the collected memory range fails. */
1590 && get_traceframe_number () == -1
1591 && (region->attrib.cache
1592 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1593 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1594 {
1595 DCACHE *dcache = target_dcache_get_or_init ();
1596 int l;
1597
1598 if (readbuf != NULL)
1599 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1600 else
1601 /* FIXME drow/2006-08-09: If we're going to preserve const
1602 correctness dcache_xfer_memory should take readbuf and
1603 writebuf. */
1604 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1605 reg_len, 1);
1606 if (l <= 0)
1607 return TARGET_XFER_E_IO;
1608 else
1609 {
1610 *xfered_len = (ULONGEST) l;
1611 return TARGET_XFER_OK;
1612 }
1613 }
1614
1615 /* If none of those methods found the memory we wanted, fall back
1616 to a target partial transfer. Normally a single call to
1617 to_xfer_partial is enough; if it doesn't recognize an object
1618 it will call the to_xfer_partial of the next target down.
1619 But for memory this won't do. Memory is the only target
1620 object which can be read from more than one valid target.
1621 A core file, for instance, could have some of memory but
1622 delegate other bits to the target below it. So, we must
1623 manually try all targets. */
1624
1625 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1626 xfered_len);
1627
1628 /* Make sure the cache gets updated no matter what - if we are writing
1629 to the stack. Even if this write is not tagged as such, we still need
1630 to update the cache. */
1631
1632 if (res == TARGET_XFER_OK
1633 && inf != NULL
1634 && writebuf != NULL
1635 && target_dcache_init_p ()
1636 && !region->attrib.cache
1637 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1638 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1639 {
1640 DCACHE *dcache = target_dcache_get ();
1641
1642 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1643 }
1644
1645 /* If we still haven't got anything, return the last error. We
1646 give up. */
1647 return res;
1648 }
1649
1650 /* Perform a partial memory transfer. For docs see target.h,
1651 to_xfer_partial. */
1652
1653 static enum target_xfer_status
1654 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1655 gdb_byte *readbuf, const gdb_byte *writebuf,
1656 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1657 {
1658 enum target_xfer_status res;
1659
1660 /* Zero length requests are ok and require no work. */
1661 if (len == 0)
1662 return TARGET_XFER_EOF;
1663
1664 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1665 breakpoint insns, thus hiding out from higher layers whether
1666 there are software breakpoints inserted in the code stream. */
1667 if (readbuf != NULL)
1668 {
1669 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1670 xfered_len);
1671
1672 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1673 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1674 }
1675 else
1676 {
1677 void *buf;
1678 struct cleanup *old_chain;
1679
1680 /* A large write request is likely to be partially satisfied
1681 by memory_xfer_partial_1. We will continually malloc
1682 and free a copy of the entire write request for breakpoint
1683 shadow handling even though we only end up writing a small
1684 subset of it. Cap writes to 4KB to mitigate this. */
1685 len = min (4096, len);
1686
1687 buf = xmalloc (len);
1688 old_chain = make_cleanup (xfree, buf);
1689 memcpy (buf, writebuf, len);
1690
1691 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1692 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1693 xfered_len);
1694
1695 do_cleanups (old_chain);
1696 }
1697
1698 return res;
1699 }
1700
1701 static void
1702 restore_show_memory_breakpoints (void *arg)
1703 {
1704 show_memory_breakpoints = (uintptr_t) arg;
1705 }
1706
1707 struct cleanup *
1708 make_show_memory_breakpoints_cleanup (int show)
1709 {
1710 int current = show_memory_breakpoints;
1711
1712 show_memory_breakpoints = show;
1713 return make_cleanup (restore_show_memory_breakpoints,
1714 (void *) (uintptr_t) current);
1715 }
1716
1717 /* For docs see target.h, to_xfer_partial. */
1718
1719 enum target_xfer_status
1720 target_xfer_partial (struct target_ops *ops,
1721 enum target_object object, const char *annex,
1722 gdb_byte *readbuf, const gdb_byte *writebuf,
1723 ULONGEST offset, ULONGEST len,
1724 ULONGEST *xfered_len)
1725 {
1726 enum target_xfer_status retval;
1727
1728 gdb_assert (ops->to_xfer_partial != NULL);
1729
1730 /* Transfer is done when LEN is zero. */
1731 if (len == 0)
1732 return TARGET_XFER_EOF;
1733
1734 if (writebuf && !may_write_memory)
1735 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1736 core_addr_to_string_nz (offset), plongest (len));
1737
1738 *xfered_len = 0;
1739
1740 /* If this is a memory transfer, let the memory-specific code
1741 have a look at it instead. Memory transfers are more
1742 complicated. */
1743 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1744 || object == TARGET_OBJECT_CODE_MEMORY)
1745 retval = memory_xfer_partial (ops, object, readbuf,
1746 writebuf, offset, len, xfered_len);
1747 else if (object == TARGET_OBJECT_RAW_MEMORY)
1748 {
1749 /* Request the normal memory object from other layers. */
1750 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1751 xfered_len);
1752 }
1753 else
1754 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1755 writebuf, offset, len, xfered_len);
1756
1757 if (targetdebug)
1758 {
1759 const unsigned char *myaddr = NULL;
1760
1761 fprintf_unfiltered (gdb_stdlog,
1762 "%s:target_xfer_partial "
1763 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1764 ops->to_shortname,
1765 (int) object,
1766 (annex ? annex : "(null)"),
1767 host_address_to_string (readbuf),
1768 host_address_to_string (writebuf),
1769 core_addr_to_string_nz (offset),
1770 pulongest (len), retval,
1771 pulongest (*xfered_len));
1772
1773 if (readbuf)
1774 myaddr = readbuf;
1775 if (writebuf)
1776 myaddr = writebuf;
1777 if (retval == TARGET_XFER_OK && myaddr != NULL)
1778 {
1779 int i;
1780
1781 fputs_unfiltered (", bytes =", gdb_stdlog);
1782 for (i = 0; i < *xfered_len; i++)
1783 {
1784 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1785 {
1786 if (targetdebug < 2 && i > 0)
1787 {
1788 fprintf_unfiltered (gdb_stdlog, " ...");
1789 break;
1790 }
1791 fprintf_unfiltered (gdb_stdlog, "\n");
1792 }
1793
1794 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1795 }
1796 }
1797
1798 fputc_unfiltered ('\n', gdb_stdlog);
1799 }
1800
1801 /* Check implementations of to_xfer_partial update *XFERED_LEN
1802 properly. Do assertion after printing debug messages, so that we
1803 can find more clues on assertion failure from debugging messages. */
1804 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1805 gdb_assert (*xfered_len > 0);
1806
1807 return retval;
1808 }
1809
1810 /* Read LEN bytes of target memory at address MEMADDR, placing the
1811 results in GDB's memory at MYADDR. Returns either 0 for success or
1812 TARGET_XFER_E_IO if any error occurs.
1813
1814 If an error occurs, no guarantee is made about the contents of the data at
1815 MYADDR. In particular, the caller should not depend upon partial reads
1816 filling the buffer with good data. There is no way for the caller to know
1817 how much good data might have been transfered anyway. Callers that can
1818 deal with partial reads should call target_read (which will retry until
1819 it makes no progress, and then return how much was transferred). */
1820
1821 int
1822 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1823 {
1824 /* Dispatch to the topmost target, not the flattened current_target.
1825 Memory accesses check target->to_has_(all_)memory, and the
1826 flattened target doesn't inherit those. */
1827 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1828 myaddr, memaddr, len) == len)
1829 return 0;
1830 else
1831 return TARGET_XFER_E_IO;
1832 }
1833
1834 /* Like target_read_memory, but specify explicitly that this is a read
1835 from the target's raw memory. That is, this read bypasses the
1836 dcache, breakpoint shadowing, etc. */
1837
1838 int
1839 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1840 {
1841 /* See comment in target_read_memory about why the request starts at
1842 current_target.beneath. */
1843 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1844 myaddr, memaddr, len) == len)
1845 return 0;
1846 else
1847 return TARGET_XFER_E_IO;
1848 }
1849
1850 /* Like target_read_memory, but specify explicitly that this is a read from
1851 the target's stack. This may trigger different cache behavior. */
1852
1853 int
1854 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1855 {
1856 /* See comment in target_read_memory about why the request starts at
1857 current_target.beneath. */
1858 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1859 myaddr, memaddr, len) == len)
1860 return 0;
1861 else
1862 return TARGET_XFER_E_IO;
1863 }
1864
1865 /* Like target_read_memory, but specify explicitly that this is a read from
1866 the target's code. This may trigger different cache behavior. */
1867
1868 int
1869 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1870 {
1871 /* See comment in target_read_memory about why the request starts at
1872 current_target.beneath. */
1873 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1874 myaddr, memaddr, len) == len)
1875 return 0;
1876 else
1877 return TARGET_XFER_E_IO;
1878 }
1879
1880 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1881 Returns either 0 for success or TARGET_XFER_E_IO if any
1882 error occurs. If an error occurs, no guarantee is made about how
1883 much data got written. Callers that can deal with partial writes
1884 should call target_write. */
1885
1886 int
1887 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1888 {
1889 /* See comment in target_read_memory about why the request starts at
1890 current_target.beneath. */
1891 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1892 myaddr, memaddr, len) == len)
1893 return 0;
1894 else
1895 return TARGET_XFER_E_IO;
1896 }
1897
1898 /* Write LEN bytes from MYADDR to target raw memory at address
1899 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1900 if any error occurs. If an error occurs, no guarantee is made
1901 about how much data got written. Callers that can deal with
1902 partial writes should call target_write. */
1903
1904 int
1905 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1906 {
1907 /* See comment in target_read_memory about why the request starts at
1908 current_target.beneath. */
1909 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1910 myaddr, memaddr, len) == len)
1911 return 0;
1912 else
1913 return TARGET_XFER_E_IO;
1914 }
1915
1916 /* Fetch the target's memory map. */
1917
1918 VEC(mem_region_s) *
1919 target_memory_map (void)
1920 {
1921 VEC(mem_region_s) *result;
1922 struct mem_region *last_one, *this_one;
1923 int ix;
1924 struct target_ops *t;
1925
1926 if (targetdebug)
1927 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1928
1929 for (t = current_target.beneath; t != NULL; t = t->beneath)
1930 if (t->to_memory_map != NULL)
1931 break;
1932
1933 if (t == NULL)
1934 return NULL;
1935
1936 result = t->to_memory_map (t);
1937 if (result == NULL)
1938 return NULL;
1939
1940 qsort (VEC_address (mem_region_s, result),
1941 VEC_length (mem_region_s, result),
1942 sizeof (struct mem_region), mem_region_cmp);
1943
1944 /* Check that regions do not overlap. Simultaneously assign
1945 a numbering for the "mem" commands to use to refer to
1946 each region. */
1947 last_one = NULL;
1948 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1949 {
1950 this_one->number = ix;
1951
1952 if (last_one && last_one->hi > this_one->lo)
1953 {
1954 warning (_("Overlapping regions in memory map: ignoring"));
1955 VEC_free (mem_region_s, result);
1956 return NULL;
1957 }
1958 last_one = this_one;
1959 }
1960
1961 return result;
1962 }
1963
1964 void
1965 target_flash_erase (ULONGEST address, LONGEST length)
1966 {
1967 struct target_ops *t;
1968
1969 for (t = current_target.beneath; t != NULL; t = t->beneath)
1970 if (t->to_flash_erase != NULL)
1971 {
1972 if (targetdebug)
1973 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1974 hex_string (address), phex (length, 0));
1975 t->to_flash_erase (t, address, length);
1976 return;
1977 }
1978
1979 tcomplain ();
1980 }
1981
1982 void
1983 target_flash_done (void)
1984 {
1985 struct target_ops *t;
1986
1987 for (t = current_target.beneath; t != NULL; t = t->beneath)
1988 if (t->to_flash_done != NULL)
1989 {
1990 if (targetdebug)
1991 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1992 t->to_flash_done (t);
1993 return;
1994 }
1995
1996 tcomplain ();
1997 }
1998
1999 static void
2000 show_trust_readonly (struct ui_file *file, int from_tty,
2001 struct cmd_list_element *c, const char *value)
2002 {
2003 fprintf_filtered (file,
2004 _("Mode for reading from readonly sections is %s.\n"),
2005 value);
2006 }
2007
2008 /* More generic transfers. */
2009
2010 static enum target_xfer_status
2011 default_xfer_partial (struct target_ops *ops, enum target_object object,
2012 const char *annex, gdb_byte *readbuf,
2013 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2014 ULONGEST *xfered_len)
2015 {
2016 if (object == TARGET_OBJECT_MEMORY
2017 && ops->deprecated_xfer_memory != NULL)
2018 /* If available, fall back to the target's
2019 "deprecated_xfer_memory" method. */
2020 {
2021 int xfered = -1;
2022
2023 errno = 0;
2024 if (writebuf != NULL)
2025 {
2026 void *buffer = xmalloc (len);
2027 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2028
2029 memcpy (buffer, writebuf, len);
2030 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2031 1/*write*/, NULL, ops);
2032 do_cleanups (cleanup);
2033 }
2034 if (readbuf != NULL)
2035 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2036 0/*read*/, NULL, ops);
2037 if (xfered > 0)
2038 {
2039 *xfered_len = (ULONGEST) xfered;
2040 return TARGET_XFER_E_IO;
2041 }
2042 else if (xfered == 0 && errno == 0)
2043 /* "deprecated_xfer_memory" uses 0, cross checked against
2044 ERRNO as one indication of an error. */
2045 return TARGET_XFER_EOF;
2046 else
2047 return TARGET_XFER_E_IO;
2048 }
2049 else
2050 {
2051 gdb_assert (ops->beneath != NULL);
2052 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2053 readbuf, writebuf, offset, len,
2054 xfered_len);
2055 }
2056 }
2057
2058 /* Target vector read/write partial wrapper functions. */
2059
2060 static enum target_xfer_status
2061 target_read_partial (struct target_ops *ops,
2062 enum target_object object,
2063 const char *annex, gdb_byte *buf,
2064 ULONGEST offset, ULONGEST len,
2065 ULONGEST *xfered_len)
2066 {
2067 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2068 xfered_len);
2069 }
2070
2071 static enum target_xfer_status
2072 target_write_partial (struct target_ops *ops,
2073 enum target_object object,
2074 const char *annex, const gdb_byte *buf,
2075 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2076 {
2077 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2078 xfered_len);
2079 }
2080
2081 /* Wrappers to perform the full transfer. */
2082
2083 /* For docs on target_read see target.h. */
2084
2085 LONGEST
2086 target_read (struct target_ops *ops,
2087 enum target_object object,
2088 const char *annex, gdb_byte *buf,
2089 ULONGEST offset, LONGEST len)
2090 {
2091 LONGEST xfered = 0;
2092
2093 while (xfered < len)
2094 {
2095 ULONGEST xfered_len;
2096 enum target_xfer_status status;
2097
2098 status = target_read_partial (ops, object, annex,
2099 (gdb_byte *) buf + xfered,
2100 offset + xfered, len - xfered,
2101 &xfered_len);
2102
2103 /* Call an observer, notifying them of the xfer progress? */
2104 if (status == TARGET_XFER_EOF)
2105 return xfered;
2106 else if (status == TARGET_XFER_OK)
2107 {
2108 xfered += xfered_len;
2109 QUIT;
2110 }
2111 else
2112 return -1;
2113
2114 }
2115 return len;
2116 }
2117
2118 /* Assuming that the entire [begin, end) range of memory cannot be
2119 read, try to read whatever subrange is possible to read.
2120
2121 The function returns, in RESULT, either zero or one memory block.
2122 If there's a readable subrange at the beginning, it is completely
2123 read and returned. Any further readable subrange will not be read.
2124 Otherwise, if there's a readable subrange at the end, it will be
2125 completely read and returned. Any readable subranges before it
2126 (obviously, not starting at the beginning), will be ignored. In
2127 other cases -- either no readable subrange, or readable subrange(s)
2128 that is neither at the beginning, or end, nothing is returned.
2129
2130 The purpose of this function is to handle a read across a boundary
2131 of accessible memory in a case when memory map is not available.
2132 The above restrictions are fine for this case, but will give
2133 incorrect results if the memory is 'patchy'. However, supporting
2134 'patchy' memory would require trying to read every single byte,
2135 and it seems unacceptable solution. Explicit memory map is
2136 recommended for this case -- and target_read_memory_robust will
2137 take care of reading multiple ranges then. */
2138
2139 static void
2140 read_whatever_is_readable (struct target_ops *ops,
2141 ULONGEST begin, ULONGEST end,
2142 VEC(memory_read_result_s) **result)
2143 {
2144 gdb_byte *buf = xmalloc (end - begin);
2145 ULONGEST current_begin = begin;
2146 ULONGEST current_end = end;
2147 int forward;
2148 memory_read_result_s r;
2149 ULONGEST xfered_len;
2150
2151 /* If we previously failed to read 1 byte, nothing can be done here. */
2152 if (end - begin <= 1)
2153 {
2154 xfree (buf);
2155 return;
2156 }
2157
2158 /* Check that either first or the last byte is readable, and give up
2159 if not. This heuristic is meant to permit reading accessible memory
2160 at the boundary of accessible region. */
2161 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2162 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2163 {
2164 forward = 1;
2165 ++current_begin;
2166 }
2167 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2168 buf + (end-begin) - 1, end - 1, 1,
2169 &xfered_len) == TARGET_XFER_OK)
2170 {
2171 forward = 0;
2172 --current_end;
2173 }
2174 else
2175 {
2176 xfree (buf);
2177 return;
2178 }
2179
2180 /* Loop invariant is that the [current_begin, current_end) was previously
2181 found to be not readable as a whole.
2182
2183 Note loop condition -- if the range has 1 byte, we can't divide the range
2184 so there's no point trying further. */
2185 while (current_end - current_begin > 1)
2186 {
2187 ULONGEST first_half_begin, first_half_end;
2188 ULONGEST second_half_begin, second_half_end;
2189 LONGEST xfer;
2190 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2191
2192 if (forward)
2193 {
2194 first_half_begin = current_begin;
2195 first_half_end = middle;
2196 second_half_begin = middle;
2197 second_half_end = current_end;
2198 }
2199 else
2200 {
2201 first_half_begin = middle;
2202 first_half_end = current_end;
2203 second_half_begin = current_begin;
2204 second_half_end = middle;
2205 }
2206
2207 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2208 buf + (first_half_begin - begin),
2209 first_half_begin,
2210 first_half_end - first_half_begin);
2211
2212 if (xfer == first_half_end - first_half_begin)
2213 {
2214 /* This half reads up fine. So, the error must be in the
2215 other half. */
2216 current_begin = second_half_begin;
2217 current_end = second_half_end;
2218 }
2219 else
2220 {
2221 /* This half is not readable. Because we've tried one byte, we
2222 know some part of this half if actually redable. Go to the next
2223 iteration to divide again and try to read.
2224
2225 We don't handle the other half, because this function only tries
2226 to read a single readable subrange. */
2227 current_begin = first_half_begin;
2228 current_end = first_half_end;
2229 }
2230 }
2231
2232 if (forward)
2233 {
2234 /* The [begin, current_begin) range has been read. */
2235 r.begin = begin;
2236 r.end = current_begin;
2237 r.data = buf;
2238 }
2239 else
2240 {
2241 /* The [current_end, end) range has been read. */
2242 LONGEST rlen = end - current_end;
2243
2244 r.data = xmalloc (rlen);
2245 memcpy (r.data, buf + current_end - begin, rlen);
2246 r.begin = current_end;
2247 r.end = end;
2248 xfree (buf);
2249 }
2250 VEC_safe_push(memory_read_result_s, (*result), &r);
2251 }
2252
2253 void
2254 free_memory_read_result_vector (void *x)
2255 {
2256 VEC(memory_read_result_s) *v = x;
2257 memory_read_result_s *current;
2258 int ix;
2259
2260 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2261 {
2262 xfree (current->data);
2263 }
2264 VEC_free (memory_read_result_s, v);
2265 }
2266
2267 VEC(memory_read_result_s) *
2268 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2269 {
2270 VEC(memory_read_result_s) *result = 0;
2271
2272 LONGEST xfered = 0;
2273 while (xfered < len)
2274 {
2275 struct mem_region *region = lookup_mem_region (offset + xfered);
2276 LONGEST rlen;
2277
2278 /* If there is no explicit region, a fake one should be created. */
2279 gdb_assert (region);
2280
2281 if (region->hi == 0)
2282 rlen = len - xfered;
2283 else
2284 rlen = region->hi - offset;
2285
2286 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2287 {
2288 /* Cannot read this region. Note that we can end up here only
2289 if the region is explicitly marked inaccessible, or
2290 'inaccessible-by-default' is in effect. */
2291 xfered += rlen;
2292 }
2293 else
2294 {
2295 LONGEST to_read = min (len - xfered, rlen);
2296 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2297
2298 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2299 (gdb_byte *) buffer,
2300 offset + xfered, to_read);
2301 /* Call an observer, notifying them of the xfer progress? */
2302 if (xfer <= 0)
2303 {
2304 /* Got an error reading full chunk. See if maybe we can read
2305 some subrange. */
2306 xfree (buffer);
2307 read_whatever_is_readable (ops, offset + xfered,
2308 offset + xfered + to_read, &result);
2309 xfered += to_read;
2310 }
2311 else
2312 {
2313 struct memory_read_result r;
2314 r.data = buffer;
2315 r.begin = offset + xfered;
2316 r.end = r.begin + xfer;
2317 VEC_safe_push (memory_read_result_s, result, &r);
2318 xfered += xfer;
2319 }
2320 QUIT;
2321 }
2322 }
2323 return result;
2324 }
2325
2326
2327 /* An alternative to target_write with progress callbacks. */
2328
2329 LONGEST
2330 target_write_with_progress (struct target_ops *ops,
2331 enum target_object object,
2332 const char *annex, const gdb_byte *buf,
2333 ULONGEST offset, LONGEST len,
2334 void (*progress) (ULONGEST, void *), void *baton)
2335 {
2336 LONGEST xfered = 0;
2337
2338 /* Give the progress callback a chance to set up. */
2339 if (progress)
2340 (*progress) (0, baton);
2341
2342 while (xfered < len)
2343 {
2344 ULONGEST xfered_len;
2345 enum target_xfer_status status;
2346
2347 status = target_write_partial (ops, object, annex,
2348 (gdb_byte *) buf + xfered,
2349 offset + xfered, len - xfered,
2350 &xfered_len);
2351
2352 if (status == TARGET_XFER_EOF)
2353 return xfered;
2354 if (TARGET_XFER_STATUS_ERROR_P (status))
2355 return -1;
2356
2357 gdb_assert (status == TARGET_XFER_OK);
2358 if (progress)
2359 (*progress) (xfered_len, baton);
2360
2361 xfered += xfered_len;
2362 QUIT;
2363 }
2364 return len;
2365 }
2366
2367 /* For docs on target_write see target.h. */
2368
2369 LONGEST
2370 target_write (struct target_ops *ops,
2371 enum target_object object,
2372 const char *annex, const gdb_byte *buf,
2373 ULONGEST offset, LONGEST len)
2374 {
2375 return target_write_with_progress (ops, object, annex, buf, offset, len,
2376 NULL, NULL);
2377 }
2378
2379 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2380 the size of the transferred data. PADDING additional bytes are
2381 available in *BUF_P. This is a helper function for
2382 target_read_alloc; see the declaration of that function for more
2383 information. */
2384
2385 static LONGEST
2386 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2387 const char *annex, gdb_byte **buf_p, int padding)
2388 {
2389 size_t buf_alloc, buf_pos;
2390 gdb_byte *buf;
2391
2392 /* This function does not have a length parameter; it reads the
2393 entire OBJECT). Also, it doesn't support objects fetched partly
2394 from one target and partly from another (in a different stratum,
2395 e.g. a core file and an executable). Both reasons make it
2396 unsuitable for reading memory. */
2397 gdb_assert (object != TARGET_OBJECT_MEMORY);
2398
2399 /* Start by reading up to 4K at a time. The target will throttle
2400 this number down if necessary. */
2401 buf_alloc = 4096;
2402 buf = xmalloc (buf_alloc);
2403 buf_pos = 0;
2404 while (1)
2405 {
2406 ULONGEST xfered_len;
2407 enum target_xfer_status status;
2408
2409 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2410 buf_pos, buf_alloc - buf_pos - padding,
2411 &xfered_len);
2412
2413 if (status == TARGET_XFER_EOF)
2414 {
2415 /* Read all there was. */
2416 if (buf_pos == 0)
2417 xfree (buf);
2418 else
2419 *buf_p = buf;
2420 return buf_pos;
2421 }
2422 else if (status != TARGET_XFER_OK)
2423 {
2424 /* An error occurred. */
2425 xfree (buf);
2426 return TARGET_XFER_E_IO;
2427 }
2428
2429 buf_pos += xfered_len;
2430
2431 /* If the buffer is filling up, expand it. */
2432 if (buf_alloc < buf_pos * 2)
2433 {
2434 buf_alloc *= 2;
2435 buf = xrealloc (buf, buf_alloc);
2436 }
2437
2438 QUIT;
2439 }
2440 }
2441
2442 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2443 the size of the transferred data. See the declaration in "target.h"
2444 function for more information about the return value. */
2445
2446 LONGEST
2447 target_read_alloc (struct target_ops *ops, enum target_object object,
2448 const char *annex, gdb_byte **buf_p)
2449 {
2450 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2451 }
2452
2453 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2454 returned as a string, allocated using xmalloc. If an error occurs
2455 or the transfer is unsupported, NULL is returned. Empty objects
2456 are returned as allocated but empty strings. A warning is issued
2457 if the result contains any embedded NUL bytes. */
2458
2459 char *
2460 target_read_stralloc (struct target_ops *ops, enum target_object object,
2461 const char *annex)
2462 {
2463 gdb_byte *buffer;
2464 char *bufstr;
2465 LONGEST i, transferred;
2466
2467 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2468 bufstr = (char *) buffer;
2469
2470 if (transferred < 0)
2471 return NULL;
2472
2473 if (transferred == 0)
2474 return xstrdup ("");
2475
2476 bufstr[transferred] = 0;
2477
2478 /* Check for embedded NUL bytes; but allow trailing NULs. */
2479 for (i = strlen (bufstr); i < transferred; i++)
2480 if (bufstr[i] != 0)
2481 {
2482 warning (_("target object %d, annex %s, "
2483 "contained unexpected null characters"),
2484 (int) object, annex ? annex : "(none)");
2485 break;
2486 }
2487
2488 return bufstr;
2489 }
2490
2491 /* Memory transfer methods. */
2492
2493 void
2494 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2495 LONGEST len)
2496 {
2497 /* This method is used to read from an alternate, non-current
2498 target. This read must bypass the overlay support (as symbols
2499 don't match this target), and GDB's internal cache (wrong cache
2500 for this target). */
2501 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2502 != len)
2503 memory_error (TARGET_XFER_E_IO, addr);
2504 }
2505
2506 ULONGEST
2507 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2508 int len, enum bfd_endian byte_order)
2509 {
2510 gdb_byte buf[sizeof (ULONGEST)];
2511
2512 gdb_assert (len <= sizeof (buf));
2513 get_target_memory (ops, addr, buf, len);
2514 return extract_unsigned_integer (buf, len, byte_order);
2515 }
2516
2517 /* See target.h. */
2518
2519 int
2520 target_insert_breakpoint (struct gdbarch *gdbarch,
2521 struct bp_target_info *bp_tgt)
2522 {
2523 if (!may_insert_breakpoints)
2524 {
2525 warning (_("May not insert breakpoints"));
2526 return 1;
2527 }
2528
2529 return current_target.to_insert_breakpoint (&current_target,
2530 gdbarch, bp_tgt);
2531 }
2532
2533 /* See target.h. */
2534
2535 int
2536 target_remove_breakpoint (struct gdbarch *gdbarch,
2537 struct bp_target_info *bp_tgt)
2538 {
2539 /* This is kind of a weird case to handle, but the permission might
2540 have been changed after breakpoints were inserted - in which case
2541 we should just take the user literally and assume that any
2542 breakpoints should be left in place. */
2543 if (!may_insert_breakpoints)
2544 {
2545 warning (_("May not remove breakpoints"));
2546 return 1;
2547 }
2548
2549 return current_target.to_remove_breakpoint (&current_target,
2550 gdbarch, bp_tgt);
2551 }
2552
2553 static void
2554 target_info (char *args, int from_tty)
2555 {
2556 struct target_ops *t;
2557 int has_all_mem = 0;
2558
2559 if (symfile_objfile != NULL)
2560 printf_unfiltered (_("Symbols from \"%s\".\n"),
2561 objfile_name (symfile_objfile));
2562
2563 for (t = target_stack; t != NULL; t = t->beneath)
2564 {
2565 if (!(*t->to_has_memory) (t))
2566 continue;
2567
2568 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2569 continue;
2570 if (has_all_mem)
2571 printf_unfiltered (_("\tWhile running this, "
2572 "GDB does not access memory from...\n"));
2573 printf_unfiltered ("%s:\n", t->to_longname);
2574 (t->to_files_info) (t);
2575 has_all_mem = (*t->to_has_all_memory) (t);
2576 }
2577 }
2578
2579 /* This function is called before any new inferior is created, e.g.
2580 by running a program, attaching, or connecting to a target.
2581 It cleans up any state from previous invocations which might
2582 change between runs. This is a subset of what target_preopen
2583 resets (things which might change between targets). */
2584
2585 void
2586 target_pre_inferior (int from_tty)
2587 {
2588 /* Clear out solib state. Otherwise the solib state of the previous
2589 inferior might have survived and is entirely wrong for the new
2590 target. This has been observed on GNU/Linux using glibc 2.3. How
2591 to reproduce:
2592
2593 bash$ ./foo&
2594 [1] 4711
2595 bash$ ./foo&
2596 [1] 4712
2597 bash$ gdb ./foo
2598 [...]
2599 (gdb) attach 4711
2600 (gdb) detach
2601 (gdb) attach 4712
2602 Cannot access memory at address 0xdeadbeef
2603 */
2604
2605 /* In some OSs, the shared library list is the same/global/shared
2606 across inferiors. If code is shared between processes, so are
2607 memory regions and features. */
2608 if (!gdbarch_has_global_solist (target_gdbarch ()))
2609 {
2610 no_shared_libraries (NULL, from_tty);
2611
2612 invalidate_target_mem_regions ();
2613
2614 target_clear_description ();
2615 }
2616
2617 agent_capability_invalidate ();
2618 }
2619
2620 /* Callback for iterate_over_inferiors. Gets rid of the given
2621 inferior. */
2622
2623 static int
2624 dispose_inferior (struct inferior *inf, void *args)
2625 {
2626 struct thread_info *thread;
2627
2628 thread = any_thread_of_process (inf->pid);
2629 if (thread)
2630 {
2631 switch_to_thread (thread->ptid);
2632
2633 /* Core inferiors actually should be detached, not killed. */
2634 if (target_has_execution)
2635 target_kill ();
2636 else
2637 target_detach (NULL, 0);
2638 }
2639
2640 return 0;
2641 }
2642
2643 /* This is to be called by the open routine before it does
2644 anything. */
2645
2646 void
2647 target_preopen (int from_tty)
2648 {
2649 dont_repeat ();
2650
2651 if (have_inferiors ())
2652 {
2653 if (!from_tty
2654 || !have_live_inferiors ()
2655 || query (_("A program is being debugged already. Kill it? ")))
2656 iterate_over_inferiors (dispose_inferior, NULL);
2657 else
2658 error (_("Program not killed."));
2659 }
2660
2661 /* Calling target_kill may remove the target from the stack. But if
2662 it doesn't (which seems like a win for UDI), remove it now. */
2663 /* Leave the exec target, though. The user may be switching from a
2664 live process to a core of the same program. */
2665 pop_all_targets_above (file_stratum);
2666
2667 target_pre_inferior (from_tty);
2668 }
2669
2670 /* Detach a target after doing deferred register stores. */
2671
2672 void
2673 target_detach (const char *args, int from_tty)
2674 {
2675 struct target_ops* t;
2676
2677 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2678 /* Don't remove global breakpoints here. They're removed on
2679 disconnection from the target. */
2680 ;
2681 else
2682 /* If we're in breakpoints-always-inserted mode, have to remove
2683 them before detaching. */
2684 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2685
2686 prepare_for_detach ();
2687
2688 for (t = current_target.beneath; t != NULL; t = t->beneath)
2689 {
2690 if (t->to_detach != NULL)
2691 {
2692 t->to_detach (t, args, from_tty);
2693 if (targetdebug)
2694 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2695 args, from_tty);
2696 return;
2697 }
2698 }
2699
2700 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2701 }
2702
2703 void
2704 target_disconnect (char *args, int from_tty)
2705 {
2706 struct target_ops *t;
2707
2708 /* If we're in breakpoints-always-inserted mode or if breakpoints
2709 are global across processes, we have to remove them before
2710 disconnecting. */
2711 remove_breakpoints ();
2712
2713 for (t = current_target.beneath; t != NULL; t = t->beneath)
2714 if (t->to_disconnect != NULL)
2715 {
2716 if (targetdebug)
2717 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2718 args, from_tty);
2719 t->to_disconnect (t, args, from_tty);
2720 return;
2721 }
2722
2723 tcomplain ();
2724 }
2725
2726 ptid_t
2727 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2728 {
2729 struct target_ops *t;
2730 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2731 status, options);
2732
2733 if (targetdebug)
2734 {
2735 char *status_string;
2736 char *options_string;
2737
2738 status_string = target_waitstatus_to_string (status);
2739 options_string = target_options_to_string (options);
2740 fprintf_unfiltered (gdb_stdlog,
2741 "target_wait (%d, status, options={%s})"
2742 " = %d, %s\n",
2743 ptid_get_pid (ptid), options_string,
2744 ptid_get_pid (retval), status_string);
2745 xfree (status_string);
2746 xfree (options_string);
2747 }
2748
2749 return retval;
2750 }
2751
2752 char *
2753 target_pid_to_str (ptid_t ptid)
2754 {
2755 struct target_ops *t;
2756
2757 for (t = current_target.beneath; t != NULL; t = t->beneath)
2758 {
2759 if (t->to_pid_to_str != NULL)
2760 return (*t->to_pid_to_str) (t, ptid);
2761 }
2762
2763 return normal_pid_to_str (ptid);
2764 }
2765
2766 char *
2767 target_thread_name (struct thread_info *info)
2768 {
2769 struct target_ops *t;
2770
2771 for (t = current_target.beneath; t != NULL; t = t->beneath)
2772 {
2773 if (t->to_thread_name != NULL)
2774 return (*t->to_thread_name) (info);
2775 }
2776
2777 return NULL;
2778 }
2779
2780 void
2781 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2782 {
2783 struct target_ops *t;
2784
2785 target_dcache_invalidate ();
2786
2787 current_target.to_resume (&current_target, ptid, step, signal);
2788 if (targetdebug)
2789 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2790 ptid_get_pid (ptid),
2791 step ? "step" : "continue",
2792 gdb_signal_to_name (signal));
2793
2794 registers_changed_ptid (ptid);
2795 set_executing (ptid, 1);
2796 set_running (ptid, 1);
2797 clear_inline_frame_state (ptid);
2798 }
2799
2800 void
2801 target_pass_signals (int numsigs, unsigned char *pass_signals)
2802 {
2803 struct target_ops *t;
2804
2805 for (t = current_target.beneath; t != NULL; t = t->beneath)
2806 {
2807 if (t->to_pass_signals != NULL)
2808 {
2809 if (targetdebug)
2810 {
2811 int i;
2812
2813 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2814 numsigs);
2815
2816 for (i = 0; i < numsigs; i++)
2817 if (pass_signals[i])
2818 fprintf_unfiltered (gdb_stdlog, " %s",
2819 gdb_signal_to_name (i));
2820
2821 fprintf_unfiltered (gdb_stdlog, " })\n");
2822 }
2823
2824 (*t->to_pass_signals) (t, numsigs, pass_signals);
2825 return;
2826 }
2827 }
2828 }
2829
2830 void
2831 target_program_signals (int numsigs, unsigned char *program_signals)
2832 {
2833 struct target_ops *t;
2834
2835 for (t = current_target.beneath; t != NULL; t = t->beneath)
2836 {
2837 if (t->to_program_signals != NULL)
2838 {
2839 if (targetdebug)
2840 {
2841 int i;
2842
2843 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2844 numsigs);
2845
2846 for (i = 0; i < numsigs; i++)
2847 if (program_signals[i])
2848 fprintf_unfiltered (gdb_stdlog, " %s",
2849 gdb_signal_to_name (i));
2850
2851 fprintf_unfiltered (gdb_stdlog, " })\n");
2852 }
2853
2854 (*t->to_program_signals) (t, numsigs, program_signals);
2855 return;
2856 }
2857 }
2858 }
2859
2860 /* Look through the list of possible targets for a target that can
2861 follow forks. */
2862
2863 int
2864 target_follow_fork (int follow_child, int detach_fork)
2865 {
2866 struct target_ops *t;
2867
2868 for (t = current_target.beneath; t != NULL; t = t->beneath)
2869 {
2870 if (t->to_follow_fork != NULL)
2871 {
2872 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2873
2874 if (targetdebug)
2875 fprintf_unfiltered (gdb_stdlog,
2876 "target_follow_fork (%d, %d) = %d\n",
2877 follow_child, detach_fork, retval);
2878 return retval;
2879 }
2880 }
2881
2882 /* Some target returned a fork event, but did not know how to follow it. */
2883 internal_error (__FILE__, __LINE__,
2884 _("could not find a target to follow fork"));
2885 }
2886
2887 void
2888 target_mourn_inferior (void)
2889 {
2890 struct target_ops *t;
2891
2892 for (t = current_target.beneath; t != NULL; t = t->beneath)
2893 {
2894 if (t->to_mourn_inferior != NULL)
2895 {
2896 t->to_mourn_inferior (t);
2897 if (targetdebug)
2898 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2899
2900 /* We no longer need to keep handles on any of the object files.
2901 Make sure to release them to avoid unnecessarily locking any
2902 of them while we're not actually debugging. */
2903 bfd_cache_close_all ();
2904
2905 return;
2906 }
2907 }
2908
2909 internal_error (__FILE__, __LINE__,
2910 _("could not find a target to follow mourn inferior"));
2911 }
2912
2913 /* Look for a target which can describe architectural features, starting
2914 from TARGET. If we find one, return its description. */
2915
2916 const struct target_desc *
2917 target_read_description (struct target_ops *target)
2918 {
2919 struct target_ops *t;
2920
2921 for (t = target; t != NULL; t = t->beneath)
2922 if (t->to_read_description != NULL)
2923 {
2924 const struct target_desc *tdesc;
2925
2926 tdesc = t->to_read_description (t);
2927 if (tdesc)
2928 return tdesc;
2929 }
2930
2931 return NULL;
2932 }
2933
2934 /* The default implementation of to_search_memory.
2935 This implements a basic search of memory, reading target memory and
2936 performing the search here (as opposed to performing the search in on the
2937 target side with, for example, gdbserver). */
2938
2939 int
2940 simple_search_memory (struct target_ops *ops,
2941 CORE_ADDR start_addr, ULONGEST search_space_len,
2942 const gdb_byte *pattern, ULONGEST pattern_len,
2943 CORE_ADDR *found_addrp)
2944 {
2945 /* NOTE: also defined in find.c testcase. */
2946 #define SEARCH_CHUNK_SIZE 16000
2947 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2948 /* Buffer to hold memory contents for searching. */
2949 gdb_byte *search_buf;
2950 unsigned search_buf_size;
2951 struct cleanup *old_cleanups;
2952
2953 search_buf_size = chunk_size + pattern_len - 1;
2954
2955 /* No point in trying to allocate a buffer larger than the search space. */
2956 if (search_space_len < search_buf_size)
2957 search_buf_size = search_space_len;
2958
2959 search_buf = malloc (search_buf_size);
2960 if (search_buf == NULL)
2961 error (_("Unable to allocate memory to perform the search."));
2962 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2963
2964 /* Prime the search buffer. */
2965
2966 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2967 search_buf, start_addr, search_buf_size) != search_buf_size)
2968 {
2969 warning (_("Unable to access %s bytes of target "
2970 "memory at %s, halting search."),
2971 pulongest (search_buf_size), hex_string (start_addr));
2972 do_cleanups (old_cleanups);
2973 return -1;
2974 }
2975
2976 /* Perform the search.
2977
2978 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2979 When we've scanned N bytes we copy the trailing bytes to the start and
2980 read in another N bytes. */
2981
2982 while (search_space_len >= pattern_len)
2983 {
2984 gdb_byte *found_ptr;
2985 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2986
2987 found_ptr = memmem (search_buf, nr_search_bytes,
2988 pattern, pattern_len);
2989
2990 if (found_ptr != NULL)
2991 {
2992 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2993
2994 *found_addrp = found_addr;
2995 do_cleanups (old_cleanups);
2996 return 1;
2997 }
2998
2999 /* Not found in this chunk, skip to next chunk. */
3000
3001 /* Don't let search_space_len wrap here, it's unsigned. */
3002 if (search_space_len >= chunk_size)
3003 search_space_len -= chunk_size;
3004 else
3005 search_space_len = 0;
3006
3007 if (search_space_len >= pattern_len)
3008 {
3009 unsigned keep_len = search_buf_size - chunk_size;
3010 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3011 int nr_to_read;
3012
3013 /* Copy the trailing part of the previous iteration to the front
3014 of the buffer for the next iteration. */
3015 gdb_assert (keep_len == pattern_len - 1);
3016 memcpy (search_buf, search_buf + chunk_size, keep_len);
3017
3018 nr_to_read = min (search_space_len - keep_len, chunk_size);
3019
3020 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3021 search_buf + keep_len, read_addr,
3022 nr_to_read) != nr_to_read)
3023 {
3024 warning (_("Unable to access %s bytes of target "
3025 "memory at %s, halting search."),
3026 plongest (nr_to_read),
3027 hex_string (read_addr));
3028 do_cleanups (old_cleanups);
3029 return -1;
3030 }
3031
3032 start_addr += chunk_size;
3033 }
3034 }
3035
3036 /* Not found. */
3037
3038 do_cleanups (old_cleanups);
3039 return 0;
3040 }
3041
3042 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3043 sequence of bytes in PATTERN with length PATTERN_LEN.
3044
3045 The result is 1 if found, 0 if not found, and -1 if there was an error
3046 requiring halting of the search (e.g. memory read error).
3047 If the pattern is found the address is recorded in FOUND_ADDRP. */
3048
3049 int
3050 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3051 const gdb_byte *pattern, ULONGEST pattern_len,
3052 CORE_ADDR *found_addrp)
3053 {
3054 struct target_ops *t;
3055 int found;
3056
3057 /* We don't use INHERIT to set current_target.to_search_memory,
3058 so we have to scan the target stack and handle targetdebug
3059 ourselves. */
3060
3061 if (targetdebug)
3062 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3063 hex_string (start_addr));
3064
3065 for (t = current_target.beneath; t != NULL; t = t->beneath)
3066 if (t->to_search_memory != NULL)
3067 break;
3068
3069 if (t != NULL)
3070 {
3071 found = t->to_search_memory (t, start_addr, search_space_len,
3072 pattern, pattern_len, found_addrp);
3073 }
3074 else
3075 {
3076 /* If a special version of to_search_memory isn't available, use the
3077 simple version. */
3078 found = simple_search_memory (current_target.beneath,
3079 start_addr, search_space_len,
3080 pattern, pattern_len, found_addrp);
3081 }
3082
3083 if (targetdebug)
3084 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3085
3086 return found;
3087 }
3088
3089 /* Look through the currently pushed targets. If none of them will
3090 be able to restart the currently running process, issue an error
3091 message. */
3092
3093 void
3094 target_require_runnable (void)
3095 {
3096 struct target_ops *t;
3097
3098 for (t = target_stack; t != NULL; t = t->beneath)
3099 {
3100 /* If this target knows how to create a new program, then
3101 assume we will still be able to after killing the current
3102 one. Either killing and mourning will not pop T, or else
3103 find_default_run_target will find it again. */
3104 if (t->to_create_inferior != NULL)
3105 return;
3106
3107 /* Do not worry about thread_stratum targets that can not
3108 create inferiors. Assume they will be pushed again if
3109 necessary, and continue to the process_stratum. */
3110 if (t->to_stratum == thread_stratum
3111 || t->to_stratum == arch_stratum)
3112 continue;
3113
3114 error (_("The \"%s\" target does not support \"run\". "
3115 "Try \"help target\" or \"continue\"."),
3116 t->to_shortname);
3117 }
3118
3119 /* This function is only called if the target is running. In that
3120 case there should have been a process_stratum target and it
3121 should either know how to create inferiors, or not... */
3122 internal_error (__FILE__, __LINE__, _("No targets found"));
3123 }
3124
3125 /* Look through the list of possible targets for a target that can
3126 execute a run or attach command without any other data. This is
3127 used to locate the default process stratum.
3128
3129 If DO_MESG is not NULL, the result is always valid (error() is
3130 called for errors); else, return NULL on error. */
3131
3132 static struct target_ops *
3133 find_default_run_target (char *do_mesg)
3134 {
3135 struct target_ops **t;
3136 struct target_ops *runable = NULL;
3137 int count;
3138
3139 count = 0;
3140
3141 for (t = target_structs; t < target_structs + target_struct_size;
3142 ++t)
3143 {
3144 if ((*t)->to_can_run && target_can_run (*t))
3145 {
3146 runable = *t;
3147 ++count;
3148 }
3149 }
3150
3151 if (count != 1)
3152 {
3153 if (do_mesg)
3154 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3155 else
3156 return NULL;
3157 }
3158
3159 return runable;
3160 }
3161
3162 void
3163 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3164 {
3165 struct target_ops *t;
3166
3167 t = find_default_run_target ("attach");
3168 (t->to_attach) (t, args, from_tty);
3169 return;
3170 }
3171
3172 void
3173 find_default_create_inferior (struct target_ops *ops,
3174 char *exec_file, char *allargs, char **env,
3175 int from_tty)
3176 {
3177 struct target_ops *t;
3178
3179 t = find_default_run_target ("run");
3180 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3181 return;
3182 }
3183
3184 static int
3185 find_default_can_async_p (struct target_ops *ignore)
3186 {
3187 struct target_ops *t;
3188
3189 /* This may be called before the target is pushed on the stack;
3190 look for the default process stratum. If there's none, gdb isn't
3191 configured with a native debugger, and target remote isn't
3192 connected yet. */
3193 t = find_default_run_target (NULL);
3194 if (t && t->to_can_async_p != delegate_can_async_p)
3195 return (t->to_can_async_p) (t);
3196 return 0;
3197 }
3198
3199 static int
3200 find_default_is_async_p (struct target_ops *ignore)
3201 {
3202 struct target_ops *t;
3203
3204 /* This may be called before the target is pushed on the stack;
3205 look for the default process stratum. If there's none, gdb isn't
3206 configured with a native debugger, and target remote isn't
3207 connected yet. */
3208 t = find_default_run_target (NULL);
3209 if (t && t->to_is_async_p != delegate_is_async_p)
3210 return (t->to_is_async_p) (t);
3211 return 0;
3212 }
3213
3214 static int
3215 find_default_supports_non_stop (void)
3216 {
3217 struct target_ops *t;
3218
3219 t = find_default_run_target (NULL);
3220 if (t && t->to_supports_non_stop)
3221 return (t->to_supports_non_stop) ();
3222 return 0;
3223 }
3224
3225 int
3226 target_supports_non_stop (void)
3227 {
3228 struct target_ops *t;
3229
3230 for (t = &current_target; t != NULL; t = t->beneath)
3231 if (t->to_supports_non_stop)
3232 return t->to_supports_non_stop ();
3233
3234 return 0;
3235 }
3236
3237 /* Implement the "info proc" command. */
3238
3239 int
3240 target_info_proc (char *args, enum info_proc_what what)
3241 {
3242 struct target_ops *t;
3243
3244 /* If we're already connected to something that can get us OS
3245 related data, use it. Otherwise, try using the native
3246 target. */
3247 if (current_target.to_stratum >= process_stratum)
3248 t = current_target.beneath;
3249 else
3250 t = find_default_run_target (NULL);
3251
3252 for (; t != NULL; t = t->beneath)
3253 {
3254 if (t->to_info_proc != NULL)
3255 {
3256 t->to_info_proc (t, args, what);
3257
3258 if (targetdebug)
3259 fprintf_unfiltered (gdb_stdlog,
3260 "target_info_proc (\"%s\", %d)\n", args, what);
3261
3262 return 1;
3263 }
3264 }
3265
3266 return 0;
3267 }
3268
3269 static int
3270 find_default_supports_disable_randomization (void)
3271 {
3272 struct target_ops *t;
3273
3274 t = find_default_run_target (NULL);
3275 if (t && t->to_supports_disable_randomization)
3276 return (t->to_supports_disable_randomization) ();
3277 return 0;
3278 }
3279
3280 int
3281 target_supports_disable_randomization (void)
3282 {
3283 struct target_ops *t;
3284
3285 for (t = &current_target; t != NULL; t = t->beneath)
3286 if (t->to_supports_disable_randomization)
3287 return t->to_supports_disable_randomization ();
3288
3289 return 0;
3290 }
3291
3292 char *
3293 target_get_osdata (const char *type)
3294 {
3295 struct target_ops *t;
3296
3297 /* If we're already connected to something that can get us OS
3298 related data, use it. Otherwise, try using the native
3299 target. */
3300 if (current_target.to_stratum >= process_stratum)
3301 t = current_target.beneath;
3302 else
3303 t = find_default_run_target ("get OS data");
3304
3305 if (!t)
3306 return NULL;
3307
3308 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3309 }
3310
3311 /* Determine the current address space of thread PTID. */
3312
3313 struct address_space *
3314 target_thread_address_space (ptid_t ptid)
3315 {
3316 struct address_space *aspace;
3317 struct inferior *inf;
3318 struct target_ops *t;
3319
3320 for (t = current_target.beneath; t != NULL; t = t->beneath)
3321 {
3322 if (t->to_thread_address_space != NULL)
3323 {
3324 aspace = t->to_thread_address_space (t, ptid);
3325 gdb_assert (aspace);
3326
3327 if (targetdebug)
3328 fprintf_unfiltered (gdb_stdlog,
3329 "target_thread_address_space (%s) = %d\n",
3330 target_pid_to_str (ptid),
3331 address_space_num (aspace));
3332 return aspace;
3333 }
3334 }
3335
3336 /* Fall-back to the "main" address space of the inferior. */
3337 inf = find_inferior_pid (ptid_get_pid (ptid));
3338
3339 if (inf == NULL || inf->aspace == NULL)
3340 internal_error (__FILE__, __LINE__,
3341 _("Can't determine the current "
3342 "address space of thread %s\n"),
3343 target_pid_to_str (ptid));
3344
3345 return inf->aspace;
3346 }
3347
3348
3349 /* Target file operations. */
3350
3351 static struct target_ops *
3352 default_fileio_target (void)
3353 {
3354 /* If we're already connected to something that can perform
3355 file I/O, use it. Otherwise, try using the native target. */
3356 if (current_target.to_stratum >= process_stratum)
3357 return current_target.beneath;
3358 else
3359 return find_default_run_target ("file I/O");
3360 }
3361
3362 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3363 target file descriptor, or -1 if an error occurs (and set
3364 *TARGET_ERRNO). */
3365 int
3366 target_fileio_open (const char *filename, int flags, int mode,
3367 int *target_errno)
3368 {
3369 struct target_ops *t;
3370
3371 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3372 {
3373 if (t->to_fileio_open != NULL)
3374 {
3375 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3376
3377 if (targetdebug)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3380 filename, flags, mode,
3381 fd, fd != -1 ? 0 : *target_errno);
3382 return fd;
3383 }
3384 }
3385
3386 *target_errno = FILEIO_ENOSYS;
3387 return -1;
3388 }
3389
3390 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3391 Return the number of bytes written, or -1 if an error occurs
3392 (and set *TARGET_ERRNO). */
3393 int
3394 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3395 ULONGEST offset, int *target_errno)
3396 {
3397 struct target_ops *t;
3398
3399 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3400 {
3401 if (t->to_fileio_pwrite != NULL)
3402 {
3403 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3404 target_errno);
3405
3406 if (targetdebug)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "target_fileio_pwrite (%d,...,%d,%s) "
3409 "= %d (%d)\n",
3410 fd, len, pulongest (offset),
3411 ret, ret != -1 ? 0 : *target_errno);
3412 return ret;
3413 }
3414 }
3415
3416 *target_errno = FILEIO_ENOSYS;
3417 return -1;
3418 }
3419
3420 /* Read up to LEN bytes FD on the target into READ_BUF.
3421 Return the number of bytes read, or -1 if an error occurs
3422 (and set *TARGET_ERRNO). */
3423 int
3424 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3425 ULONGEST offset, int *target_errno)
3426 {
3427 struct target_ops *t;
3428
3429 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3430 {
3431 if (t->to_fileio_pread != NULL)
3432 {
3433 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3434 target_errno);
3435
3436 if (targetdebug)
3437 fprintf_unfiltered (gdb_stdlog,
3438 "target_fileio_pread (%d,...,%d,%s) "
3439 "= %d (%d)\n",
3440 fd, len, pulongest (offset),
3441 ret, ret != -1 ? 0 : *target_errno);
3442 return ret;
3443 }
3444 }
3445
3446 *target_errno = FILEIO_ENOSYS;
3447 return -1;
3448 }
3449
3450 /* Close FD on the target. Return 0, or -1 if an error occurs
3451 (and set *TARGET_ERRNO). */
3452 int
3453 target_fileio_close (int fd, int *target_errno)
3454 {
3455 struct target_ops *t;
3456
3457 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3458 {
3459 if (t->to_fileio_close != NULL)
3460 {
3461 int ret = t->to_fileio_close (fd, target_errno);
3462
3463 if (targetdebug)
3464 fprintf_unfiltered (gdb_stdlog,
3465 "target_fileio_close (%d) = %d (%d)\n",
3466 fd, ret, ret != -1 ? 0 : *target_errno);
3467 return ret;
3468 }
3469 }
3470
3471 *target_errno = FILEIO_ENOSYS;
3472 return -1;
3473 }
3474
3475 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3476 occurs (and set *TARGET_ERRNO). */
3477 int
3478 target_fileio_unlink (const char *filename, int *target_errno)
3479 {
3480 struct target_ops *t;
3481
3482 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3483 {
3484 if (t->to_fileio_unlink != NULL)
3485 {
3486 int ret = t->to_fileio_unlink (filename, target_errno);
3487
3488 if (targetdebug)
3489 fprintf_unfiltered (gdb_stdlog,
3490 "target_fileio_unlink (%s) = %d (%d)\n",
3491 filename, ret, ret != -1 ? 0 : *target_errno);
3492 return ret;
3493 }
3494 }
3495
3496 *target_errno = FILEIO_ENOSYS;
3497 return -1;
3498 }
3499
3500 /* Read value of symbolic link FILENAME on the target. Return a
3501 null-terminated string allocated via xmalloc, or NULL if an error
3502 occurs (and set *TARGET_ERRNO). */
3503 char *
3504 target_fileio_readlink (const char *filename, int *target_errno)
3505 {
3506 struct target_ops *t;
3507
3508 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3509 {
3510 if (t->to_fileio_readlink != NULL)
3511 {
3512 char *ret = t->to_fileio_readlink (filename, target_errno);
3513
3514 if (targetdebug)
3515 fprintf_unfiltered (gdb_stdlog,
3516 "target_fileio_readlink (%s) = %s (%d)\n",
3517 filename, ret? ret : "(nil)",
3518 ret? 0 : *target_errno);
3519 return ret;
3520 }
3521 }
3522
3523 *target_errno = FILEIO_ENOSYS;
3524 return NULL;
3525 }
3526
3527 static void
3528 target_fileio_close_cleanup (void *opaque)
3529 {
3530 int fd = *(int *) opaque;
3531 int target_errno;
3532
3533 target_fileio_close (fd, &target_errno);
3534 }
3535
3536 /* Read target file FILENAME. Store the result in *BUF_P and
3537 return the size of the transferred data. PADDING additional bytes are
3538 available in *BUF_P. This is a helper function for
3539 target_fileio_read_alloc; see the declaration of that function for more
3540 information. */
3541
3542 static LONGEST
3543 target_fileio_read_alloc_1 (const char *filename,
3544 gdb_byte **buf_p, int padding)
3545 {
3546 struct cleanup *close_cleanup;
3547 size_t buf_alloc, buf_pos;
3548 gdb_byte *buf;
3549 LONGEST n;
3550 int fd;
3551 int target_errno;
3552
3553 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3554 if (fd == -1)
3555 return -1;
3556
3557 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3558
3559 /* Start by reading up to 4K at a time. The target will throttle
3560 this number down if necessary. */
3561 buf_alloc = 4096;
3562 buf = xmalloc (buf_alloc);
3563 buf_pos = 0;
3564 while (1)
3565 {
3566 n = target_fileio_pread (fd, &buf[buf_pos],
3567 buf_alloc - buf_pos - padding, buf_pos,
3568 &target_errno);
3569 if (n < 0)
3570 {
3571 /* An error occurred. */
3572 do_cleanups (close_cleanup);
3573 xfree (buf);
3574 return -1;
3575 }
3576 else if (n == 0)
3577 {
3578 /* Read all there was. */
3579 do_cleanups (close_cleanup);
3580 if (buf_pos == 0)
3581 xfree (buf);
3582 else
3583 *buf_p = buf;
3584 return buf_pos;
3585 }
3586
3587 buf_pos += n;
3588
3589 /* If the buffer is filling up, expand it. */
3590 if (buf_alloc < buf_pos * 2)
3591 {
3592 buf_alloc *= 2;
3593 buf = xrealloc (buf, buf_alloc);
3594 }
3595
3596 QUIT;
3597 }
3598 }
3599
3600 /* Read target file FILENAME. Store the result in *BUF_P and return
3601 the size of the transferred data. See the declaration in "target.h"
3602 function for more information about the return value. */
3603
3604 LONGEST
3605 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3606 {
3607 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3608 }
3609
3610 /* Read target file FILENAME. The result is NUL-terminated and
3611 returned as a string, allocated using xmalloc. If an error occurs
3612 or the transfer is unsupported, NULL is returned. Empty objects
3613 are returned as allocated but empty strings. A warning is issued
3614 if the result contains any embedded NUL bytes. */
3615
3616 char *
3617 target_fileio_read_stralloc (const char *filename)
3618 {
3619 gdb_byte *buffer;
3620 char *bufstr;
3621 LONGEST i, transferred;
3622
3623 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3624 bufstr = (char *) buffer;
3625
3626 if (transferred < 0)
3627 return NULL;
3628
3629 if (transferred == 0)
3630 return xstrdup ("");
3631
3632 bufstr[transferred] = 0;
3633
3634 /* Check for embedded NUL bytes; but allow trailing NULs. */
3635 for (i = strlen (bufstr); i < transferred; i++)
3636 if (bufstr[i] != 0)
3637 {
3638 warning (_("target file %s "
3639 "contained unexpected null characters"),
3640 filename);
3641 break;
3642 }
3643
3644 return bufstr;
3645 }
3646
3647
3648 static int
3649 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3650 CORE_ADDR addr, int len)
3651 {
3652 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3653 }
3654
3655 static int
3656 default_watchpoint_addr_within_range (struct target_ops *target,
3657 CORE_ADDR addr,
3658 CORE_ADDR start, int length)
3659 {
3660 return addr >= start && addr < start + length;
3661 }
3662
3663 static struct gdbarch *
3664 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3665 {
3666 return target_gdbarch ();
3667 }
3668
3669 static int
3670 return_zero (void)
3671 {
3672 return 0;
3673 }
3674
3675 static int
3676 return_one (void)
3677 {
3678 return 1;
3679 }
3680
3681 static int
3682 return_minus_one (void)
3683 {
3684 return -1;
3685 }
3686
3687 static void *
3688 return_null (void)
3689 {
3690 return 0;
3691 }
3692
3693 /*
3694 * Find the next target down the stack from the specified target.
3695 */
3696
3697 struct target_ops *
3698 find_target_beneath (struct target_ops *t)
3699 {
3700 return t->beneath;
3701 }
3702
3703 /* See target.h. */
3704
3705 struct target_ops *
3706 find_target_at (enum strata stratum)
3707 {
3708 struct target_ops *t;
3709
3710 for (t = current_target.beneath; t != NULL; t = t->beneath)
3711 if (t->to_stratum == stratum)
3712 return t;
3713
3714 return NULL;
3715 }
3716
3717 \f
3718 /* The inferior process has died. Long live the inferior! */
3719
3720 void
3721 generic_mourn_inferior (void)
3722 {
3723 ptid_t ptid;
3724
3725 ptid = inferior_ptid;
3726 inferior_ptid = null_ptid;
3727
3728 /* Mark breakpoints uninserted in case something tries to delete a
3729 breakpoint while we delete the inferior's threads (which would
3730 fail, since the inferior is long gone). */
3731 mark_breakpoints_out ();
3732
3733 if (!ptid_equal (ptid, null_ptid))
3734 {
3735 int pid = ptid_get_pid (ptid);
3736 exit_inferior (pid);
3737 }
3738
3739 /* Note this wipes step-resume breakpoints, so needs to be done
3740 after exit_inferior, which ends up referencing the step-resume
3741 breakpoints through clear_thread_inferior_resources. */
3742 breakpoint_init_inferior (inf_exited);
3743
3744 registers_changed ();
3745
3746 reopen_exec_file ();
3747 reinit_frame_cache ();
3748
3749 if (deprecated_detach_hook)
3750 deprecated_detach_hook ();
3751 }
3752 \f
3753 /* Convert a normal process ID to a string. Returns the string in a
3754 static buffer. */
3755
3756 char *
3757 normal_pid_to_str (ptid_t ptid)
3758 {
3759 static char buf[32];
3760
3761 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3762 return buf;
3763 }
3764
3765 static char *
3766 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3767 {
3768 return normal_pid_to_str (ptid);
3769 }
3770
3771 /* Error-catcher for target_find_memory_regions. */
3772 static int
3773 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3774 {
3775 error (_("Command not implemented for this target."));
3776 return 0;
3777 }
3778
3779 /* Error-catcher for target_make_corefile_notes. */
3780 static char *
3781 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3782 {
3783 error (_("Command not implemented for this target."));
3784 return NULL;
3785 }
3786
3787 /* Error-catcher for target_get_bookmark. */
3788 static gdb_byte *
3789 dummy_get_bookmark (char *ignore1, int ignore2)
3790 {
3791 tcomplain ();
3792 return NULL;
3793 }
3794
3795 /* Error-catcher for target_goto_bookmark. */
3796 static void
3797 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3798 {
3799 tcomplain ();
3800 }
3801
3802 /* Set up the handful of non-empty slots needed by the dummy target
3803 vector. */
3804
3805 static void
3806 init_dummy_target (void)
3807 {
3808 dummy_target.to_shortname = "None";
3809 dummy_target.to_longname = "None";
3810 dummy_target.to_doc = "";
3811 dummy_target.to_attach = find_default_attach;
3812 dummy_target.to_detach =
3813 (void (*)(struct target_ops *, const char *, int))target_ignore;
3814 dummy_target.to_create_inferior = find_default_create_inferior;
3815 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3816 dummy_target.to_supports_disable_randomization
3817 = find_default_supports_disable_randomization;
3818 dummy_target.to_pid_to_str = dummy_pid_to_str;
3819 dummy_target.to_stratum = dummy_stratum;
3820 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3821 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3822 dummy_target.to_get_bookmark = dummy_get_bookmark;
3823 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3824 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3825 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3826 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3827 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3828 dummy_target.to_has_execution
3829 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3830 dummy_target.to_magic = OPS_MAGIC;
3831
3832 install_dummy_methods (&dummy_target);
3833 }
3834 \f
3835 static void
3836 debug_to_open (char *args, int from_tty)
3837 {
3838 debug_target.to_open (args, from_tty);
3839
3840 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3841 }
3842
3843 void
3844 target_close (struct target_ops *targ)
3845 {
3846 gdb_assert (!target_is_pushed (targ));
3847
3848 if (targ->to_xclose != NULL)
3849 targ->to_xclose (targ);
3850 else if (targ->to_close != NULL)
3851 targ->to_close (targ);
3852
3853 if (targetdebug)
3854 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3855 }
3856
3857 void
3858 target_attach (char *args, int from_tty)
3859 {
3860 struct target_ops *t;
3861
3862 for (t = current_target.beneath; t != NULL; t = t->beneath)
3863 {
3864 if (t->to_attach != NULL)
3865 {
3866 t->to_attach (t, args, from_tty);
3867 if (targetdebug)
3868 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3869 args, from_tty);
3870 return;
3871 }
3872 }
3873
3874 internal_error (__FILE__, __LINE__,
3875 _("could not find a target to attach"));
3876 }
3877
3878 int
3879 target_thread_alive (ptid_t ptid)
3880 {
3881 struct target_ops *t;
3882
3883 for (t = current_target.beneath; t != NULL; t = t->beneath)
3884 {
3885 if (t->to_thread_alive != NULL)
3886 {
3887 int retval;
3888
3889 retval = t->to_thread_alive (t, ptid);
3890 if (targetdebug)
3891 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3892 ptid_get_pid (ptid), retval);
3893
3894 return retval;
3895 }
3896 }
3897
3898 return 0;
3899 }
3900
3901 void
3902 target_find_new_threads (void)
3903 {
3904 struct target_ops *t;
3905
3906 for (t = current_target.beneath; t != NULL; t = t->beneath)
3907 {
3908 if (t->to_find_new_threads != NULL)
3909 {
3910 t->to_find_new_threads (t);
3911 if (targetdebug)
3912 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3913
3914 return;
3915 }
3916 }
3917 }
3918
3919 void
3920 target_stop (ptid_t ptid)
3921 {
3922 if (!may_stop)
3923 {
3924 warning (_("May not interrupt or stop the target, ignoring attempt"));
3925 return;
3926 }
3927
3928 (*current_target.to_stop) (ptid);
3929 }
3930
3931 static void
3932 debug_to_post_attach (struct target_ops *self, int pid)
3933 {
3934 debug_target.to_post_attach (&debug_target, pid);
3935
3936 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3937 }
3938
3939 /* Concatenate ELEM to LIST, a comma separate list, and return the
3940 result. The LIST incoming argument is released. */
3941
3942 static char *
3943 str_comma_list_concat_elem (char *list, const char *elem)
3944 {
3945 if (list == NULL)
3946 return xstrdup (elem);
3947 else
3948 return reconcat (list, list, ", ", elem, (char *) NULL);
3949 }
3950
3951 /* Helper for target_options_to_string. If OPT is present in
3952 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3953 Returns the new resulting string. OPT is removed from
3954 TARGET_OPTIONS. */
3955
3956 static char *
3957 do_option (int *target_options, char *ret,
3958 int opt, char *opt_str)
3959 {
3960 if ((*target_options & opt) != 0)
3961 {
3962 ret = str_comma_list_concat_elem (ret, opt_str);
3963 *target_options &= ~opt;
3964 }
3965
3966 return ret;
3967 }
3968
3969 char *
3970 target_options_to_string (int target_options)
3971 {
3972 char *ret = NULL;
3973
3974 #define DO_TARG_OPTION(OPT) \
3975 ret = do_option (&target_options, ret, OPT, #OPT)
3976
3977 DO_TARG_OPTION (TARGET_WNOHANG);
3978
3979 if (target_options != 0)
3980 ret = str_comma_list_concat_elem (ret, "unknown???");
3981
3982 if (ret == NULL)
3983 ret = xstrdup ("");
3984 return ret;
3985 }
3986
3987 static void
3988 debug_print_register (const char * func,
3989 struct regcache *regcache, int regno)
3990 {
3991 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3992
3993 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3994 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3995 && gdbarch_register_name (gdbarch, regno) != NULL
3996 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3997 fprintf_unfiltered (gdb_stdlog, "(%s)",
3998 gdbarch_register_name (gdbarch, regno));
3999 else
4000 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
4001 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
4002 {
4003 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4004 int i, size = register_size (gdbarch, regno);
4005 gdb_byte buf[MAX_REGISTER_SIZE];
4006
4007 regcache_raw_collect (regcache, regno, buf);
4008 fprintf_unfiltered (gdb_stdlog, " = ");
4009 for (i = 0; i < size; i++)
4010 {
4011 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4012 }
4013 if (size <= sizeof (LONGEST))
4014 {
4015 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4016
4017 fprintf_unfiltered (gdb_stdlog, " %s %s",
4018 core_addr_to_string_nz (val), plongest (val));
4019 }
4020 }
4021 fprintf_unfiltered (gdb_stdlog, "\n");
4022 }
4023
4024 void
4025 target_fetch_registers (struct regcache *regcache, int regno)
4026 {
4027 struct target_ops *t;
4028
4029 for (t = current_target.beneath; t != NULL; t = t->beneath)
4030 {
4031 if (t->to_fetch_registers != NULL)
4032 {
4033 t->to_fetch_registers (t, regcache, regno);
4034 if (targetdebug)
4035 debug_print_register ("target_fetch_registers", regcache, regno);
4036 return;
4037 }
4038 }
4039 }
4040
4041 void
4042 target_store_registers (struct regcache *regcache, int regno)
4043 {
4044 struct target_ops *t;
4045
4046 if (!may_write_registers)
4047 error (_("Writing to registers is not allowed (regno %d)"), regno);
4048
4049 current_target.to_store_registers (&current_target, regcache, regno);
4050 if (targetdebug)
4051 {
4052 debug_print_register ("target_store_registers", regcache, regno);
4053 }
4054 }
4055
4056 int
4057 target_core_of_thread (ptid_t ptid)
4058 {
4059 struct target_ops *t;
4060
4061 for (t = current_target.beneath; t != NULL; t = t->beneath)
4062 {
4063 if (t->to_core_of_thread != NULL)
4064 {
4065 int retval = t->to_core_of_thread (t, ptid);
4066
4067 if (targetdebug)
4068 fprintf_unfiltered (gdb_stdlog,
4069 "target_core_of_thread (%d) = %d\n",
4070 ptid_get_pid (ptid), retval);
4071 return retval;
4072 }
4073 }
4074
4075 return -1;
4076 }
4077
4078 int
4079 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4080 {
4081 struct target_ops *t;
4082
4083 for (t = current_target.beneath; t != NULL; t = t->beneath)
4084 {
4085 if (t->to_verify_memory != NULL)
4086 {
4087 int retval = t->to_verify_memory (t, data, memaddr, size);
4088
4089 if (targetdebug)
4090 fprintf_unfiltered (gdb_stdlog,
4091 "target_verify_memory (%s, %s) = %d\n",
4092 paddress (target_gdbarch (), memaddr),
4093 pulongest (size),
4094 retval);
4095 return retval;
4096 }
4097 }
4098
4099 tcomplain ();
4100 }
4101
4102 /* The documentation for this function is in its prototype declaration in
4103 target.h. */
4104
4105 int
4106 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4107 {
4108 struct target_ops *t;
4109
4110 for (t = current_target.beneath; t != NULL; t = t->beneath)
4111 if (t->to_insert_mask_watchpoint != NULL)
4112 {
4113 int ret;
4114
4115 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4116
4117 if (targetdebug)
4118 fprintf_unfiltered (gdb_stdlog, "\
4119 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4120 core_addr_to_string (addr),
4121 core_addr_to_string (mask), rw, ret);
4122
4123 return ret;
4124 }
4125
4126 return 1;
4127 }
4128
4129 /* The documentation for this function is in its prototype declaration in
4130 target.h. */
4131
4132 int
4133 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4134 {
4135 struct target_ops *t;
4136
4137 for (t = current_target.beneath; t != NULL; t = t->beneath)
4138 if (t->to_remove_mask_watchpoint != NULL)
4139 {
4140 int ret;
4141
4142 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4143
4144 if (targetdebug)
4145 fprintf_unfiltered (gdb_stdlog, "\
4146 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4147 core_addr_to_string (addr),
4148 core_addr_to_string (mask), rw, ret);
4149
4150 return ret;
4151 }
4152
4153 return 1;
4154 }
4155
4156 /* The documentation for this function is in its prototype declaration
4157 in target.h. */
4158
4159 int
4160 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_masked_watch_num_registers != NULL)
4166 return t->to_masked_watch_num_registers (t, addr, mask);
4167
4168 return -1;
4169 }
4170
4171 /* The documentation for this function is in its prototype declaration
4172 in target.h. */
4173
4174 int
4175 target_ranged_break_num_registers (void)
4176 {
4177 struct target_ops *t;
4178
4179 for (t = current_target.beneath; t != NULL; t = t->beneath)
4180 if (t->to_ranged_break_num_registers != NULL)
4181 return t->to_ranged_break_num_registers (t);
4182
4183 return -1;
4184 }
4185
4186 /* See target.h. */
4187
4188 struct btrace_target_info *
4189 target_enable_btrace (ptid_t ptid)
4190 {
4191 struct target_ops *t;
4192
4193 for (t = current_target.beneath; t != NULL; t = t->beneath)
4194 if (t->to_enable_btrace != NULL)
4195 return t->to_enable_btrace (ptid);
4196
4197 tcomplain ();
4198 return NULL;
4199 }
4200
4201 /* See target.h. */
4202
4203 void
4204 target_disable_btrace (struct btrace_target_info *btinfo)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_disable_btrace != NULL)
4210 {
4211 t->to_disable_btrace (btinfo);
4212 return;
4213 }
4214
4215 tcomplain ();
4216 }
4217
4218 /* See target.h. */
4219
4220 void
4221 target_teardown_btrace (struct btrace_target_info *btinfo)
4222 {
4223 struct target_ops *t;
4224
4225 for (t = current_target.beneath; t != NULL; t = t->beneath)
4226 if (t->to_teardown_btrace != NULL)
4227 {
4228 t->to_teardown_btrace (btinfo);
4229 return;
4230 }
4231
4232 tcomplain ();
4233 }
4234
4235 /* See target.h. */
4236
4237 enum btrace_error
4238 target_read_btrace (VEC (btrace_block_s) **btrace,
4239 struct btrace_target_info *btinfo,
4240 enum btrace_read_type type)
4241 {
4242 struct target_ops *t;
4243
4244 for (t = current_target.beneath; t != NULL; t = t->beneath)
4245 if (t->to_read_btrace != NULL)
4246 return t->to_read_btrace (btrace, btinfo, type);
4247
4248 tcomplain ();
4249 return BTRACE_ERR_NOT_SUPPORTED;
4250 }
4251
4252 /* See target.h. */
4253
4254 void
4255 target_stop_recording (void)
4256 {
4257 struct target_ops *t;
4258
4259 for (t = current_target.beneath; t != NULL; t = t->beneath)
4260 if (t->to_stop_recording != NULL)
4261 {
4262 t->to_stop_recording ();
4263 return;
4264 }
4265
4266 /* This is optional. */
4267 }
4268
4269 /* See target.h. */
4270
4271 void
4272 target_info_record (void)
4273 {
4274 struct target_ops *t;
4275
4276 for (t = current_target.beneath; t != NULL; t = t->beneath)
4277 if (t->to_info_record != NULL)
4278 {
4279 t->to_info_record ();
4280 return;
4281 }
4282
4283 tcomplain ();
4284 }
4285
4286 /* See target.h. */
4287
4288 void
4289 target_save_record (const char *filename)
4290 {
4291 struct target_ops *t;
4292
4293 for (t = current_target.beneath; t != NULL; t = t->beneath)
4294 if (t->to_save_record != NULL)
4295 {
4296 t->to_save_record (filename);
4297 return;
4298 }
4299
4300 tcomplain ();
4301 }
4302
4303 /* See target.h. */
4304
4305 int
4306 target_supports_delete_record (void)
4307 {
4308 struct target_ops *t;
4309
4310 for (t = current_target.beneath; t != NULL; t = t->beneath)
4311 if (t->to_delete_record != NULL)
4312 return 1;
4313
4314 return 0;
4315 }
4316
4317 /* See target.h. */
4318
4319 void
4320 target_delete_record (void)
4321 {
4322 struct target_ops *t;
4323
4324 for (t = current_target.beneath; t != NULL; t = t->beneath)
4325 if (t->to_delete_record != NULL)
4326 {
4327 t->to_delete_record ();
4328 return;
4329 }
4330
4331 tcomplain ();
4332 }
4333
4334 /* See target.h. */
4335
4336 int
4337 target_record_is_replaying (void)
4338 {
4339 struct target_ops *t;
4340
4341 for (t = current_target.beneath; t != NULL; t = t->beneath)
4342 if (t->to_record_is_replaying != NULL)
4343 return t->to_record_is_replaying ();
4344
4345 return 0;
4346 }
4347
4348 /* See target.h. */
4349
4350 void
4351 target_goto_record_begin (void)
4352 {
4353 struct target_ops *t;
4354
4355 for (t = current_target.beneath; t != NULL; t = t->beneath)
4356 if (t->to_goto_record_begin != NULL)
4357 {
4358 t->to_goto_record_begin ();
4359 return;
4360 }
4361
4362 tcomplain ();
4363 }
4364
4365 /* See target.h. */
4366
4367 void
4368 target_goto_record_end (void)
4369 {
4370 struct target_ops *t;
4371
4372 for (t = current_target.beneath; t != NULL; t = t->beneath)
4373 if (t->to_goto_record_end != NULL)
4374 {
4375 t->to_goto_record_end ();
4376 return;
4377 }
4378
4379 tcomplain ();
4380 }
4381
4382 /* See target.h. */
4383
4384 void
4385 target_goto_record (ULONGEST insn)
4386 {
4387 struct target_ops *t;
4388
4389 for (t = current_target.beneath; t != NULL; t = t->beneath)
4390 if (t->to_goto_record != NULL)
4391 {
4392 t->to_goto_record (insn);
4393 return;
4394 }
4395
4396 tcomplain ();
4397 }
4398
4399 /* See target.h. */
4400
4401 void
4402 target_insn_history (int size, int flags)
4403 {
4404 struct target_ops *t;
4405
4406 for (t = current_target.beneath; t != NULL; t = t->beneath)
4407 if (t->to_insn_history != NULL)
4408 {
4409 t->to_insn_history (size, flags);
4410 return;
4411 }
4412
4413 tcomplain ();
4414 }
4415
4416 /* See target.h. */
4417
4418 void
4419 target_insn_history_from (ULONGEST from, int size, int flags)
4420 {
4421 struct target_ops *t;
4422
4423 for (t = current_target.beneath; t != NULL; t = t->beneath)
4424 if (t->to_insn_history_from != NULL)
4425 {
4426 t->to_insn_history_from (from, size, flags);
4427 return;
4428 }
4429
4430 tcomplain ();
4431 }
4432
4433 /* See target.h. */
4434
4435 void
4436 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4437 {
4438 struct target_ops *t;
4439
4440 for (t = current_target.beneath; t != NULL; t = t->beneath)
4441 if (t->to_insn_history_range != NULL)
4442 {
4443 t->to_insn_history_range (begin, end, flags);
4444 return;
4445 }
4446
4447 tcomplain ();
4448 }
4449
4450 /* See target.h. */
4451
4452 void
4453 target_call_history (int size, int flags)
4454 {
4455 struct target_ops *t;
4456
4457 for (t = current_target.beneath; t != NULL; t = t->beneath)
4458 if (t->to_call_history != NULL)
4459 {
4460 t->to_call_history (size, flags);
4461 return;
4462 }
4463
4464 tcomplain ();
4465 }
4466
4467 /* See target.h. */
4468
4469 void
4470 target_call_history_from (ULONGEST begin, int size, int flags)
4471 {
4472 struct target_ops *t;
4473
4474 for (t = current_target.beneath; t != NULL; t = t->beneath)
4475 if (t->to_call_history_from != NULL)
4476 {
4477 t->to_call_history_from (begin, size, flags);
4478 return;
4479 }
4480
4481 tcomplain ();
4482 }
4483
4484 /* See target.h. */
4485
4486 void
4487 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4488 {
4489 struct target_ops *t;
4490
4491 for (t = current_target.beneath; t != NULL; t = t->beneath)
4492 if (t->to_call_history_range != NULL)
4493 {
4494 t->to_call_history_range (begin, end, flags);
4495 return;
4496 }
4497
4498 tcomplain ();
4499 }
4500
4501 static void
4502 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4503 {
4504 debug_target.to_prepare_to_store (&debug_target, regcache);
4505
4506 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4507 }
4508
4509 /* See target.h. */
4510
4511 const struct frame_unwind *
4512 target_get_unwinder (void)
4513 {
4514 struct target_ops *t;
4515
4516 for (t = current_target.beneath; t != NULL; t = t->beneath)
4517 if (t->to_get_unwinder != NULL)
4518 return t->to_get_unwinder;
4519
4520 return NULL;
4521 }
4522
4523 /* See target.h. */
4524
4525 const struct frame_unwind *
4526 target_get_tailcall_unwinder (void)
4527 {
4528 struct target_ops *t;
4529
4530 for (t = current_target.beneath; t != NULL; t = t->beneath)
4531 if (t->to_get_tailcall_unwinder != NULL)
4532 return t->to_get_tailcall_unwinder;
4533
4534 return NULL;
4535 }
4536
4537 /* See target.h. */
4538
4539 CORE_ADDR
4540 forward_target_decr_pc_after_break (struct target_ops *ops,
4541 struct gdbarch *gdbarch)
4542 {
4543 for (; ops != NULL; ops = ops->beneath)
4544 if (ops->to_decr_pc_after_break != NULL)
4545 return ops->to_decr_pc_after_break (ops, gdbarch);
4546
4547 return gdbarch_decr_pc_after_break (gdbarch);
4548 }
4549
4550 /* See target.h. */
4551
4552 CORE_ADDR
4553 target_decr_pc_after_break (struct gdbarch *gdbarch)
4554 {
4555 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4556 }
4557
4558 static int
4559 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4560 int write, struct mem_attrib *attrib,
4561 struct target_ops *target)
4562 {
4563 int retval;
4564
4565 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4566 attrib, target);
4567
4568 fprintf_unfiltered (gdb_stdlog,
4569 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4570 paddress (target_gdbarch (), memaddr), len,
4571 write ? "write" : "read", retval);
4572
4573 if (retval > 0)
4574 {
4575 int i;
4576
4577 fputs_unfiltered (", bytes =", gdb_stdlog);
4578 for (i = 0; i < retval; i++)
4579 {
4580 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4581 {
4582 if (targetdebug < 2 && i > 0)
4583 {
4584 fprintf_unfiltered (gdb_stdlog, " ...");
4585 break;
4586 }
4587 fprintf_unfiltered (gdb_stdlog, "\n");
4588 }
4589
4590 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4591 }
4592 }
4593
4594 fputc_unfiltered ('\n', gdb_stdlog);
4595
4596 return retval;
4597 }
4598
4599 static void
4600 debug_to_files_info (struct target_ops *target)
4601 {
4602 debug_target.to_files_info (target);
4603
4604 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4605 }
4606
4607 static int
4608 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4609 struct bp_target_info *bp_tgt)
4610 {
4611 int retval;
4612
4613 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4614
4615 fprintf_unfiltered (gdb_stdlog,
4616 "target_insert_breakpoint (%s, xxx) = %ld\n",
4617 core_addr_to_string (bp_tgt->placed_address),
4618 (unsigned long) retval);
4619 return retval;
4620 }
4621
4622 static int
4623 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4624 struct bp_target_info *bp_tgt)
4625 {
4626 int retval;
4627
4628 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4629
4630 fprintf_unfiltered (gdb_stdlog,
4631 "target_remove_breakpoint (%s, xxx) = %ld\n",
4632 core_addr_to_string (bp_tgt->placed_address),
4633 (unsigned long) retval);
4634 return retval;
4635 }
4636
4637 static int
4638 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4639 int type, int cnt, int from_tty)
4640 {
4641 int retval;
4642
4643 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4644 type, cnt, from_tty);
4645
4646 fprintf_unfiltered (gdb_stdlog,
4647 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4648 (unsigned long) type,
4649 (unsigned long) cnt,
4650 (unsigned long) from_tty,
4651 (unsigned long) retval);
4652 return retval;
4653 }
4654
4655 static int
4656 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4657 CORE_ADDR addr, int len)
4658 {
4659 CORE_ADDR retval;
4660
4661 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4662 addr, len);
4663
4664 fprintf_unfiltered (gdb_stdlog,
4665 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4666 core_addr_to_string (addr), (unsigned long) len,
4667 core_addr_to_string (retval));
4668 return retval;
4669 }
4670
4671 static int
4672 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4673 CORE_ADDR addr, int len, int rw,
4674 struct expression *cond)
4675 {
4676 int retval;
4677
4678 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4679 addr, len,
4680 rw, cond);
4681
4682 fprintf_unfiltered (gdb_stdlog,
4683 "target_can_accel_watchpoint_condition "
4684 "(%s, %d, %d, %s) = %ld\n",
4685 core_addr_to_string (addr), len, rw,
4686 host_address_to_string (cond), (unsigned long) retval);
4687 return retval;
4688 }
4689
4690 static int
4691 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4692 {
4693 int retval;
4694
4695 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4696
4697 fprintf_unfiltered (gdb_stdlog,
4698 "target_stopped_by_watchpoint () = %ld\n",
4699 (unsigned long) retval);
4700 return retval;
4701 }
4702
4703 static int
4704 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4705 {
4706 int retval;
4707
4708 retval = debug_target.to_stopped_data_address (target, addr);
4709
4710 fprintf_unfiltered (gdb_stdlog,
4711 "target_stopped_data_address ([%s]) = %ld\n",
4712 core_addr_to_string (*addr),
4713 (unsigned long)retval);
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4719 CORE_ADDR addr,
4720 CORE_ADDR start, int length)
4721 {
4722 int retval;
4723
4724 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4725 start, length);
4726
4727 fprintf_filtered (gdb_stdlog,
4728 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4729 core_addr_to_string (addr), core_addr_to_string (start),
4730 length, retval);
4731 return retval;
4732 }
4733
4734 static int
4735 debug_to_insert_hw_breakpoint (struct target_ops *self,
4736 struct gdbarch *gdbarch,
4737 struct bp_target_info *bp_tgt)
4738 {
4739 int retval;
4740
4741 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4742 gdbarch, bp_tgt);
4743
4744 fprintf_unfiltered (gdb_stdlog,
4745 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4746 core_addr_to_string (bp_tgt->placed_address),
4747 (unsigned long) retval);
4748 return retval;
4749 }
4750
4751 static int
4752 debug_to_remove_hw_breakpoint (struct target_ops *self,
4753 struct gdbarch *gdbarch,
4754 struct bp_target_info *bp_tgt)
4755 {
4756 int retval;
4757
4758 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4759 gdbarch, bp_tgt);
4760
4761 fprintf_unfiltered (gdb_stdlog,
4762 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4763 core_addr_to_string (bp_tgt->placed_address),
4764 (unsigned long) retval);
4765 return retval;
4766 }
4767
4768 static int
4769 debug_to_insert_watchpoint (struct target_ops *self,
4770 CORE_ADDR addr, int len, int type,
4771 struct expression *cond)
4772 {
4773 int retval;
4774
4775 retval = debug_target.to_insert_watchpoint (&debug_target,
4776 addr, len, type, cond);
4777
4778 fprintf_unfiltered (gdb_stdlog,
4779 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4780 core_addr_to_string (addr), len, type,
4781 host_address_to_string (cond), (unsigned long) retval);
4782 return retval;
4783 }
4784
4785 static int
4786 debug_to_remove_watchpoint (struct target_ops *self,
4787 CORE_ADDR addr, int len, int type,
4788 struct expression *cond)
4789 {
4790 int retval;
4791
4792 retval = debug_target.to_remove_watchpoint (&debug_target,
4793 addr, len, type, cond);
4794
4795 fprintf_unfiltered (gdb_stdlog,
4796 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4797 core_addr_to_string (addr), len, type,
4798 host_address_to_string (cond), (unsigned long) retval);
4799 return retval;
4800 }
4801
4802 static void
4803 debug_to_terminal_init (struct target_ops *self)
4804 {
4805 debug_target.to_terminal_init (&debug_target);
4806
4807 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4808 }
4809
4810 static void
4811 debug_to_terminal_inferior (struct target_ops *self)
4812 {
4813 debug_target.to_terminal_inferior (&debug_target);
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4816 }
4817
4818 static void
4819 debug_to_terminal_ours_for_output (struct target_ops *self)
4820 {
4821 debug_target.to_terminal_ours_for_output (&debug_target);
4822
4823 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4824 }
4825
4826 static void
4827 debug_to_terminal_ours (struct target_ops *self)
4828 {
4829 debug_target.to_terminal_ours (&debug_target);
4830
4831 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4832 }
4833
4834 static void
4835 debug_to_terminal_save_ours (struct target_ops *self)
4836 {
4837 debug_target.to_terminal_save_ours (&debug_target);
4838
4839 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4840 }
4841
4842 static void
4843 debug_to_terminal_info (struct target_ops *self,
4844 const char *arg, int from_tty)
4845 {
4846 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4847
4848 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4849 from_tty);
4850 }
4851
4852 static void
4853 debug_to_load (struct target_ops *self, char *args, int from_tty)
4854 {
4855 debug_target.to_load (&debug_target, args, from_tty);
4856
4857 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4858 }
4859
4860 static void
4861 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4862 {
4863 debug_target.to_post_startup_inferior (&debug_target, ptid);
4864
4865 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4866 ptid_get_pid (ptid));
4867 }
4868
4869 static int
4870 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4871 {
4872 int retval;
4873
4874 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4875
4876 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4877 pid, retval);
4878
4879 return retval;
4880 }
4881
4882 static int
4883 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4884 {
4885 int retval;
4886
4887 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4888
4889 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4890 pid, retval);
4891
4892 return retval;
4893 }
4894
4895 static int
4896 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4897 {
4898 int retval;
4899
4900 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4901
4902 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4903 pid, retval);
4904
4905 return retval;
4906 }
4907
4908 static int
4909 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4910 {
4911 int retval;
4912
4913 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4914
4915 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4916 pid, retval);
4917
4918 return retval;
4919 }
4920
4921 static int
4922 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4923 {
4924 int retval;
4925
4926 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4927
4928 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4929 pid, retval);
4930
4931 return retval;
4932 }
4933
4934 static int
4935 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4936 {
4937 int retval;
4938
4939 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4940
4941 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4942 pid, retval);
4943
4944 return retval;
4945 }
4946
4947 static int
4948 debug_to_has_exited (struct target_ops *self,
4949 int pid, int wait_status, int *exit_status)
4950 {
4951 int has_exited;
4952
4953 has_exited = debug_target.to_has_exited (&debug_target,
4954 pid, wait_status, exit_status);
4955
4956 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4957 pid, wait_status, *exit_status, has_exited);
4958
4959 return has_exited;
4960 }
4961
4962 static int
4963 debug_to_can_run (struct target_ops *self)
4964 {
4965 int retval;
4966
4967 retval = debug_target.to_can_run (&debug_target);
4968
4969 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4970
4971 return retval;
4972 }
4973
4974 static struct gdbarch *
4975 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4976 {
4977 struct gdbarch *retval;
4978
4979 retval = debug_target.to_thread_architecture (ops, ptid);
4980
4981 fprintf_unfiltered (gdb_stdlog,
4982 "target_thread_architecture (%s) = %s [%s]\n",
4983 target_pid_to_str (ptid),
4984 host_address_to_string (retval),
4985 gdbarch_bfd_arch_info (retval)->printable_name);
4986 return retval;
4987 }
4988
4989 static void
4990 debug_to_stop (ptid_t ptid)
4991 {
4992 debug_target.to_stop (ptid);
4993
4994 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4995 target_pid_to_str (ptid));
4996 }
4997
4998 static void
4999 debug_to_rcmd (char *command,
5000 struct ui_file *outbuf)
5001 {
5002 debug_target.to_rcmd (command, outbuf);
5003 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
5004 }
5005
5006 static char *
5007 debug_to_pid_to_exec_file (int pid)
5008 {
5009 char *exec_file;
5010
5011 exec_file = debug_target.to_pid_to_exec_file (pid);
5012
5013 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5014 pid, exec_file);
5015
5016 return exec_file;
5017 }
5018
5019 static void
5020 setup_target_debug (void)
5021 {
5022 memcpy (&debug_target, &current_target, sizeof debug_target);
5023
5024 current_target.to_open = debug_to_open;
5025 current_target.to_post_attach = debug_to_post_attach;
5026 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5027 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5028 current_target.to_files_info = debug_to_files_info;
5029 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5030 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5031 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5032 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5033 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5034 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5035 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5036 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5037 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5038 current_target.to_watchpoint_addr_within_range
5039 = debug_to_watchpoint_addr_within_range;
5040 current_target.to_region_ok_for_hw_watchpoint
5041 = debug_to_region_ok_for_hw_watchpoint;
5042 current_target.to_can_accel_watchpoint_condition
5043 = debug_to_can_accel_watchpoint_condition;
5044 current_target.to_terminal_init = debug_to_terminal_init;
5045 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5046 current_target.to_terminal_ours_for_output
5047 = debug_to_terminal_ours_for_output;
5048 current_target.to_terminal_ours = debug_to_terminal_ours;
5049 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5050 current_target.to_terminal_info = debug_to_terminal_info;
5051 current_target.to_load = debug_to_load;
5052 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5053 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5054 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5055 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5056 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5057 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5058 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5059 current_target.to_has_exited = debug_to_has_exited;
5060 current_target.to_can_run = debug_to_can_run;
5061 current_target.to_stop = debug_to_stop;
5062 current_target.to_rcmd = debug_to_rcmd;
5063 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5064 current_target.to_thread_architecture = debug_to_thread_architecture;
5065 }
5066 \f
5067
5068 static char targ_desc[] =
5069 "Names of targets and files being debugged.\nShows the entire \
5070 stack of targets currently in use (including the exec-file,\n\
5071 core-file, and process, if any), as well as the symbol file name.";
5072
5073 static void
5074 do_monitor_command (char *cmd,
5075 int from_tty)
5076 {
5077 if ((current_target.to_rcmd
5078 == (void (*) (char *, struct ui_file *)) tcomplain)
5079 || (current_target.to_rcmd == debug_to_rcmd
5080 && (debug_target.to_rcmd
5081 == (void (*) (char *, struct ui_file *)) tcomplain)))
5082 error (_("\"monitor\" command not supported by this target."));
5083 target_rcmd (cmd, gdb_stdtarg);
5084 }
5085
5086 /* Print the name of each layers of our target stack. */
5087
5088 static void
5089 maintenance_print_target_stack (char *cmd, int from_tty)
5090 {
5091 struct target_ops *t;
5092
5093 printf_filtered (_("The current target stack is:\n"));
5094
5095 for (t = target_stack; t != NULL; t = t->beneath)
5096 {
5097 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5098 }
5099 }
5100
5101 /* Controls if async mode is permitted. */
5102 int target_async_permitted = 0;
5103
5104 /* The set command writes to this variable. If the inferior is
5105 executing, target_async_permitted is *not* updated. */
5106 static int target_async_permitted_1 = 0;
5107
5108 static void
5109 set_target_async_command (char *args, int from_tty,
5110 struct cmd_list_element *c)
5111 {
5112 if (have_live_inferiors ())
5113 {
5114 target_async_permitted_1 = target_async_permitted;
5115 error (_("Cannot change this setting while the inferior is running."));
5116 }
5117
5118 target_async_permitted = target_async_permitted_1;
5119 }
5120
5121 static void
5122 show_target_async_command (struct ui_file *file, int from_tty,
5123 struct cmd_list_element *c,
5124 const char *value)
5125 {
5126 fprintf_filtered (file,
5127 _("Controlling the inferior in "
5128 "asynchronous mode is %s.\n"), value);
5129 }
5130
5131 /* Temporary copies of permission settings. */
5132
5133 static int may_write_registers_1 = 1;
5134 static int may_write_memory_1 = 1;
5135 static int may_insert_breakpoints_1 = 1;
5136 static int may_insert_tracepoints_1 = 1;
5137 static int may_insert_fast_tracepoints_1 = 1;
5138 static int may_stop_1 = 1;
5139
5140 /* Make the user-set values match the real values again. */
5141
5142 void
5143 update_target_permissions (void)
5144 {
5145 may_write_registers_1 = may_write_registers;
5146 may_write_memory_1 = may_write_memory;
5147 may_insert_breakpoints_1 = may_insert_breakpoints;
5148 may_insert_tracepoints_1 = may_insert_tracepoints;
5149 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5150 may_stop_1 = may_stop;
5151 }
5152
5153 /* The one function handles (most of) the permission flags in the same
5154 way. */
5155
5156 static void
5157 set_target_permissions (char *args, int from_tty,
5158 struct cmd_list_element *c)
5159 {
5160 if (target_has_execution)
5161 {
5162 update_target_permissions ();
5163 error (_("Cannot change this setting while the inferior is running."));
5164 }
5165
5166 /* Make the real values match the user-changed values. */
5167 may_write_registers = may_write_registers_1;
5168 may_insert_breakpoints = may_insert_breakpoints_1;
5169 may_insert_tracepoints = may_insert_tracepoints_1;
5170 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5171 may_stop = may_stop_1;
5172 update_observer_mode ();
5173 }
5174
5175 /* Set memory write permission independently of observer mode. */
5176
5177 static void
5178 set_write_memory_permission (char *args, int from_tty,
5179 struct cmd_list_element *c)
5180 {
5181 /* Make the real values match the user-changed values. */
5182 may_write_memory = may_write_memory_1;
5183 update_observer_mode ();
5184 }
5185
5186
5187 void
5188 initialize_targets (void)
5189 {
5190 init_dummy_target ();
5191 push_target (&dummy_target);
5192
5193 add_info ("target", target_info, targ_desc);
5194 add_info ("files", target_info, targ_desc);
5195
5196 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5197 Set target debugging."), _("\
5198 Show target debugging."), _("\
5199 When non-zero, target debugging is enabled. Higher numbers are more\n\
5200 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5201 command."),
5202 NULL,
5203 show_targetdebug,
5204 &setdebuglist, &showdebuglist);
5205
5206 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5207 &trust_readonly, _("\
5208 Set mode for reading from readonly sections."), _("\
5209 Show mode for reading from readonly sections."), _("\
5210 When this mode is on, memory reads from readonly sections (such as .text)\n\
5211 will be read from the object file instead of from the target. This will\n\
5212 result in significant performance improvement for remote targets."),
5213 NULL,
5214 show_trust_readonly,
5215 &setlist, &showlist);
5216
5217 add_com ("monitor", class_obscure, do_monitor_command,
5218 _("Send a command to the remote monitor (remote targets only)."));
5219
5220 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5221 _("Print the name of each layer of the internal target stack."),
5222 &maintenanceprintlist);
5223
5224 add_setshow_boolean_cmd ("target-async", no_class,
5225 &target_async_permitted_1, _("\
5226 Set whether gdb controls the inferior in asynchronous mode."), _("\
5227 Show whether gdb controls the inferior in asynchronous mode."), _("\
5228 Tells gdb whether to control the inferior in asynchronous mode."),
5229 set_target_async_command,
5230 show_target_async_command,
5231 &setlist,
5232 &showlist);
5233
5234 add_setshow_boolean_cmd ("may-write-registers", class_support,
5235 &may_write_registers_1, _("\
5236 Set permission to write into registers."), _("\
5237 Show permission to write into registers."), _("\
5238 When this permission is on, GDB may write into the target's registers.\n\
5239 Otherwise, any sort of write attempt will result in an error."),
5240 set_target_permissions, NULL,
5241 &setlist, &showlist);
5242
5243 add_setshow_boolean_cmd ("may-write-memory", class_support,
5244 &may_write_memory_1, _("\
5245 Set permission to write into target memory."), _("\
5246 Show permission to write into target memory."), _("\
5247 When this permission is on, GDB may write into the target's memory.\n\
5248 Otherwise, any sort of write attempt will result in an error."),
5249 set_write_memory_permission, NULL,
5250 &setlist, &showlist);
5251
5252 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5253 &may_insert_breakpoints_1, _("\
5254 Set permission to insert breakpoints in the target."), _("\
5255 Show permission to insert breakpoints in the target."), _("\
5256 When this permission is on, GDB may insert breakpoints in the program.\n\
5257 Otherwise, any sort of insertion attempt will result in an error."),
5258 set_target_permissions, NULL,
5259 &setlist, &showlist);
5260
5261 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5262 &may_insert_tracepoints_1, _("\
5263 Set permission to insert tracepoints in the target."), _("\
5264 Show permission to insert tracepoints in the target."), _("\
5265 When this permission is on, GDB may insert tracepoints in the program.\n\
5266 Otherwise, any sort of insertion attempt will result in an error."),
5267 set_target_permissions, NULL,
5268 &setlist, &showlist);
5269
5270 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5271 &may_insert_fast_tracepoints_1, _("\
5272 Set permission to insert fast tracepoints in the target."), _("\
5273 Show permission to insert fast tracepoints in the target."), _("\
5274 When this permission is on, GDB may insert fast tracepoints.\n\
5275 Otherwise, any sort of insertion attempt will result in an error."),
5276 set_target_permissions, NULL,
5277 &setlist, &showlist);
5278
5279 add_setshow_boolean_cmd ("may-interrupt", class_support,
5280 &may_stop_1, _("\
5281 Set permission to interrupt or signal the target."), _("\
5282 Show permission to interrupt or signal the target."), _("\
5283 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5284 Otherwise, any attempt to interrupt or stop will be ignored."),
5285 set_target_permissions, NULL,
5286 &setlist, &showlist);
5287 }
This page took 0.15349 seconds and 4 git commands to generate.