convert to_prepare_to_store
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 static void *return_null (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int find_default_can_async_p (struct target_ops *ignore);
84
85 static int find_default_is_async_p (struct target_ops *ignore);
86
87 #include "target-delegates.c"
88
89 static void init_dummy_target (void);
90
91 static struct target_ops debug_target;
92
93 static void debug_to_open (char *, int);
94
95 static void debug_to_prepare_to_store (struct target_ops *self,
96 struct regcache *);
97
98 static void debug_to_files_info (struct target_ops *);
99
100 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
107 int, int, int);
108
109 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
110 struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
114 struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (struct target_ops *self,
118 CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (struct target_ops *self,
122 CORE_ADDR, int, int,
123 struct expression *);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
131 CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
134 CORE_ADDR, int, int,
135 struct expression *);
136
137 static void debug_to_terminal_init (struct target_ops *self);
138
139 static void debug_to_terminal_inferior (struct target_ops *self);
140
141 static void debug_to_terminal_ours_for_output (struct target_ops *self);
142
143 static void debug_to_terminal_save_ours (struct target_ops *self);
144
145 static void debug_to_terminal_ours (struct target_ops *self);
146
147 static void debug_to_load (struct target_ops *self, char *, int);
148
149 static int debug_to_can_run (struct target_ops *self);
150
151 static void debug_to_stop (struct target_ops *self, ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static unsigned int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The user just typed 'target' without the name of a target. */
218
219 static void
220 target_command (char *arg, int from_tty)
221 {
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
223 gdb_stdout);
224 }
225
226 /* Default target_has_* methods for process_stratum targets. */
227
228 int
229 default_child_has_all_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_stack (struct target_ops *ops)
250 {
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_registers (struct target_ops *ops)
260 {
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
270 {
271 /* If there's no thread selected, then we can't make it run through
272 hoops. */
273 if (ptid_equal (the_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279
280 int
281 target_has_all_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_all_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_memory_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_memory (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_stack_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_stack (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_registers_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_registers (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_1 (ptid_t the_ptid)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_execution (t, the_ptid))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_execution_current (void)
342 {
343 return target_has_execution_1 (inferior_ptid);
344 }
345
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
348
349 void
350 complete_target_initialization (struct target_ops *t)
351 {
352 /* Provide default values for all "must have" methods. */
353 if (t->to_xfer_partial == NULL)
354 t->to_xfer_partial = default_xfer_partial;
355
356 if (t->to_has_all_memory == NULL)
357 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_memory == NULL)
360 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_stack == NULL)
363 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_registers == NULL)
366 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_execution == NULL)
369 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
370
371 install_delegators (t);
372 }
373
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
377
378 void
379 add_target_with_completer (struct target_ops *t,
380 completer_ftype *completer)
381 {
382 struct cmd_list_element *c;
383
384 complete_target_initialization (t);
385
386 if (!target_structs)
387 {
388 target_struct_allocsize = DEFAULT_ALLOCSIZE;
389 target_structs = (struct target_ops **) xmalloc
390 (target_struct_allocsize * sizeof (*target_structs));
391 }
392 if (target_struct_size >= target_struct_allocsize)
393 {
394 target_struct_allocsize *= 2;
395 target_structs = (struct target_ops **)
396 xrealloc ((char *) target_structs,
397 target_struct_allocsize * sizeof (*target_structs));
398 }
399 target_structs[target_struct_size++] = t;
400
401 if (targetlist == NULL)
402 add_prefix_cmd ("target", class_run, target_command, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist, "target ", 0, &cmdlist);
409 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
410 &targetlist);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
434 alt = xstrprintf ("target %s", t->to_shortname);
435 deprecate_cmd (c, alt);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (&current_target, arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) (&current_target);
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 /* Install the delegators. */
580 install_delegators (&current_target);
581
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
585
586 for (t = target_stack; t; t = t->beneath)
587 {
588 INHERIT (to_shortname, t);
589 INHERIT (to_longname, t);
590 INHERIT (to_doc, t);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait, t);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory, t);
604 INHERIT (to_files_info, t);
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 INHERIT (to_can_use_hw_breakpoint, t);
608 INHERIT (to_insert_hw_breakpoint, t);
609 INHERIT (to_remove_hw_breakpoint, t);
610 /* Do not inherit to_ranged_break_num_registers. */
611 INHERIT (to_insert_watchpoint, t);
612 INHERIT (to_remove_watchpoint, t);
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint, t);
617 INHERIT (to_have_continuable_watchpoint, t);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 INHERIT (to_watchpoint_addr_within_range, t);
620 INHERIT (to_region_ok_for_hw_watchpoint, t);
621 INHERIT (to_can_accel_watchpoint_condition, t);
622 /* Do not inherit to_masked_watch_num_registers. */
623 INHERIT (to_terminal_init, t);
624 INHERIT (to_terminal_inferior, t);
625 INHERIT (to_terminal_ours_for_output, t);
626 INHERIT (to_terminal_ours, t);
627 INHERIT (to_terminal_save_ours, t);
628 INHERIT (to_terminal_info, t);
629 /* Do not inherit to_kill. */
630 INHERIT (to_load, t);
631 /* Do no inherit to_create_inferior. */
632 INHERIT (to_post_startup_inferior, t);
633 INHERIT (to_insert_fork_catchpoint, t);
634 INHERIT (to_remove_fork_catchpoint, t);
635 INHERIT (to_insert_vfork_catchpoint, t);
636 INHERIT (to_remove_vfork_catchpoint, t);
637 /* Do not inherit to_follow_fork. */
638 INHERIT (to_insert_exec_catchpoint, t);
639 INHERIT (to_remove_exec_catchpoint, t);
640 INHERIT (to_set_syscall_catchpoint, t);
641 INHERIT (to_has_exited, t);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run, t);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info, t);
650 INHERIT (to_thread_name, t);
651 INHERIT (to_stop, t);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file, t);
655 INHERIT (to_log_command, t);
656 INHERIT (to_stratum, t);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control, t);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions, t);
667 INHERIT (to_make_corefile_notes, t);
668 INHERIT (to_get_bookmark, t);
669 INHERIT (to_goto_bookmark, t);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse, t);
672 INHERIT (to_execution_direction, t);
673 INHERIT (to_thread_architecture, t);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid, t);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process, t);
678 INHERIT (to_supports_enable_disable_tracepoint, t);
679 INHERIT (to_supports_string_tracing, t);
680 INHERIT (to_trace_init, t);
681 INHERIT (to_download_tracepoint, t);
682 INHERIT (to_can_download_tracepoint, t);
683 INHERIT (to_download_trace_state_variable, t);
684 INHERIT (to_enable_tracepoint, t);
685 INHERIT (to_disable_tracepoint, t);
686 INHERIT (to_trace_set_readonly_regions, t);
687 INHERIT (to_trace_start, t);
688 INHERIT (to_get_trace_status, t);
689 INHERIT (to_get_tracepoint_status, t);
690 INHERIT (to_trace_stop, t);
691 INHERIT (to_trace_find, t);
692 INHERIT (to_get_trace_state_variable_value, t);
693 INHERIT (to_save_trace_data, t);
694 INHERIT (to_upload_tracepoints, t);
695 INHERIT (to_upload_trace_state_variables, t);
696 INHERIT (to_get_raw_trace_data, t);
697 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
698 INHERIT (to_set_disconnected_tracing, t);
699 INHERIT (to_set_circular_trace_buffer, t);
700 INHERIT (to_set_trace_buffer_size, t);
701 INHERIT (to_set_trace_notes, t);
702 INHERIT (to_get_tib_address, t);
703 INHERIT (to_set_permissions, t);
704 INHERIT (to_static_tracepoint_marker_at, t);
705 INHERIT (to_static_tracepoint_markers_by_strid, t);
706 INHERIT (to_traceframe_info, t);
707 INHERIT (to_use_agent, t);
708 INHERIT (to_can_use_agent, t);
709 INHERIT (to_augmented_libraries_svr4_read, t);
710 INHERIT (to_magic, t);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
712 INHERIT (to_can_run_breakpoint_commands, t);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
716 }
717 #undef INHERIT
718
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
722
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
726
727 de_fault (to_open,
728 (void (*) (char *, int))
729 tcomplain);
730 de_fault (to_close,
731 (void (*) (struct target_ops *))
732 target_ignore);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_files_info,
738 (void (*) (struct target_ops *))
739 target_ignore);
740 de_fault (to_can_use_hw_breakpoint,
741 (int (*) (struct target_ops *, int, int, int))
742 return_zero);
743 de_fault (to_insert_hw_breakpoint,
744 (int (*) (struct target_ops *, struct gdbarch *,
745 struct bp_target_info *))
746 return_minus_one);
747 de_fault (to_remove_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_insert_watchpoint,
752 (int (*) (struct target_ops *, CORE_ADDR, int, int,
753 struct expression *))
754 return_minus_one);
755 de_fault (to_remove_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_watchpoint_addr_within_range,
760 default_watchpoint_addr_within_range);
761 de_fault (to_region_ok_for_hw_watchpoint,
762 default_region_ok_for_hw_watchpoint);
763 de_fault (to_can_accel_watchpoint_condition,
764 (int (*) (struct target_ops *, CORE_ADDR, int, int,
765 struct expression *))
766 return_zero);
767 de_fault (to_terminal_init,
768 (void (*) (struct target_ops *))
769 target_ignore);
770 de_fault (to_terminal_inferior,
771 (void (*) (struct target_ops *))
772 target_ignore);
773 de_fault (to_terminal_ours_for_output,
774 (void (*) (struct target_ops *))
775 target_ignore);
776 de_fault (to_terminal_ours,
777 (void (*) (struct target_ops *))
778 target_ignore);
779 de_fault (to_terminal_save_ours,
780 (void (*) (struct target_ops *))
781 target_ignore);
782 de_fault (to_terminal_info,
783 default_terminal_info);
784 de_fault (to_load,
785 (void (*) (struct target_ops *, char *, int))
786 tcomplain);
787 de_fault (to_post_startup_inferior,
788 (void (*) (struct target_ops *, ptid_t))
789 target_ignore);
790 de_fault (to_insert_fork_catchpoint,
791 (int (*) (struct target_ops *, int))
792 return_one);
793 de_fault (to_remove_fork_catchpoint,
794 (int (*) (struct target_ops *, int))
795 return_one);
796 de_fault (to_insert_vfork_catchpoint,
797 (int (*) (struct target_ops *, int))
798 return_one);
799 de_fault (to_remove_vfork_catchpoint,
800 (int (*) (struct target_ops *, int))
801 return_one);
802 de_fault (to_insert_exec_catchpoint,
803 (int (*) (struct target_ops *, int))
804 return_one);
805 de_fault (to_remove_exec_catchpoint,
806 (int (*) (struct target_ops *, int))
807 return_one);
808 de_fault (to_set_syscall_catchpoint,
809 (int (*) (struct target_ops *, int, int, int, int, int *))
810 return_one);
811 de_fault (to_has_exited,
812 (int (*) (struct target_ops *, int, int, int *))
813 return_zero);
814 de_fault (to_can_run,
815 (int (*) (struct target_ops *))
816 return_zero);
817 de_fault (to_extra_thread_info,
818 (char *(*) (struct target_ops *, struct thread_info *))
819 return_null);
820 de_fault (to_thread_name,
821 (char *(*) (struct target_ops *, struct thread_info *))
822 return_null);
823 de_fault (to_stop,
824 (void (*) (struct target_ops *, ptid_t))
825 target_ignore);
826 de_fault (to_pid_to_exec_file,
827 (char *(*) (struct target_ops *, int))
828 return_null);
829 de_fault (to_thread_architecture,
830 default_thread_architecture);
831 current_target.to_read_description = NULL;
832 de_fault (to_get_ada_task_ptid,
833 (ptid_t (*) (struct target_ops *, long, long))
834 default_get_ada_task_ptid);
835 de_fault (to_supports_multi_process,
836 (int (*) (struct target_ops *))
837 return_zero);
838 de_fault (to_supports_enable_disable_tracepoint,
839 (int (*) (struct target_ops *))
840 return_zero);
841 de_fault (to_supports_string_tracing,
842 (int (*) (struct target_ops *))
843 return_zero);
844 de_fault (to_trace_init,
845 (void (*) (struct target_ops *))
846 tcomplain);
847 de_fault (to_download_tracepoint,
848 (void (*) (struct target_ops *, struct bp_location *))
849 tcomplain);
850 de_fault (to_can_download_tracepoint,
851 (int (*) (struct target_ops *))
852 return_zero);
853 de_fault (to_download_trace_state_variable,
854 (void (*) (struct target_ops *, struct trace_state_variable *))
855 tcomplain);
856 de_fault (to_enable_tracepoint,
857 (void (*) (struct target_ops *, struct bp_location *))
858 tcomplain);
859 de_fault (to_disable_tracepoint,
860 (void (*) (struct target_ops *, struct bp_location *))
861 tcomplain);
862 de_fault (to_trace_set_readonly_regions,
863 (void (*) (struct target_ops *))
864 tcomplain);
865 de_fault (to_trace_start,
866 (void (*) (struct target_ops *))
867 tcomplain);
868 de_fault (to_get_trace_status,
869 (int (*) (struct target_ops *, struct trace_status *))
870 return_minus_one);
871 de_fault (to_get_tracepoint_status,
872 (void (*) (struct target_ops *, struct breakpoint *,
873 struct uploaded_tp *))
874 tcomplain);
875 de_fault (to_trace_stop,
876 (void (*) (struct target_ops *))
877 tcomplain);
878 de_fault (to_trace_find,
879 (int (*) (struct target_ops *,
880 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
881 return_minus_one);
882 de_fault (to_get_trace_state_variable_value,
883 (int (*) (struct target_ops *, int, LONGEST *))
884 return_zero);
885 de_fault (to_save_trace_data,
886 (int (*) (struct target_ops *, const char *))
887 tcomplain);
888 de_fault (to_upload_tracepoints,
889 (int (*) (struct target_ops *, struct uploaded_tp **))
890 return_zero);
891 de_fault (to_upload_trace_state_variables,
892 (int (*) (struct target_ops *, struct uploaded_tsv **))
893 return_zero);
894 de_fault (to_get_raw_trace_data,
895 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
896 tcomplain);
897 de_fault (to_get_min_fast_tracepoint_insn_len,
898 (int (*) (struct target_ops *))
899 return_minus_one);
900 de_fault (to_set_disconnected_tracing,
901 (void (*) (struct target_ops *, int))
902 target_ignore);
903 de_fault (to_set_circular_trace_buffer,
904 (void (*) (struct target_ops *, int))
905 target_ignore);
906 de_fault (to_set_trace_buffer_size,
907 (void (*) (struct target_ops *, LONGEST))
908 target_ignore);
909 de_fault (to_set_trace_notes,
910 (int (*) (struct target_ops *,
911 const char *, const char *, const char *))
912 return_zero);
913 de_fault (to_get_tib_address,
914 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
915 tcomplain);
916 de_fault (to_set_permissions,
917 (void (*) (struct target_ops *))
918 target_ignore);
919 de_fault (to_static_tracepoint_marker_at,
920 (int (*) (struct target_ops *,
921 CORE_ADDR, struct static_tracepoint_marker *))
922 return_zero);
923 de_fault (to_static_tracepoint_markers_by_strid,
924 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
925 const char *))
926 tcomplain);
927 de_fault (to_traceframe_info,
928 (struct traceframe_info * (*) (struct target_ops *))
929 return_null);
930 de_fault (to_supports_evaluation_of_breakpoint_conditions,
931 (int (*) (struct target_ops *))
932 return_zero);
933 de_fault (to_can_run_breakpoint_commands,
934 (int (*) (struct target_ops *))
935 return_zero);
936 de_fault (to_use_agent,
937 (int (*) (struct target_ops *, int))
938 tcomplain);
939 de_fault (to_can_use_agent,
940 (int (*) (struct target_ops *))
941 return_zero);
942 de_fault (to_augmented_libraries_svr4_read,
943 (int (*) (struct target_ops *))
944 return_zero);
945 de_fault (to_execution_direction, default_execution_direction);
946
947 #undef de_fault
948
949 /* Finally, position the target-stack beneath the squashed
950 "current_target". That way code looking for a non-inherited
951 target method can quickly and simply find it. */
952 current_target.beneath = target_stack;
953
954 if (targetdebug)
955 setup_target_debug ();
956 }
957
958 /* Push a new target type into the stack of the existing target accessors,
959 possibly superseding some of the existing accessors.
960
961 Rather than allow an empty stack, we always have the dummy target at
962 the bottom stratum, so we can call the function vectors without
963 checking them. */
964
965 void
966 push_target (struct target_ops *t)
967 {
968 struct target_ops **cur;
969
970 /* Check magic number. If wrong, it probably means someone changed
971 the struct definition, but not all the places that initialize one. */
972 if (t->to_magic != OPS_MAGIC)
973 {
974 fprintf_unfiltered (gdb_stderr,
975 "Magic number of %s target struct wrong\n",
976 t->to_shortname);
977 internal_error (__FILE__, __LINE__,
978 _("failed internal consistency check"));
979 }
980
981 /* Find the proper stratum to install this target in. */
982 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
983 {
984 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
985 break;
986 }
987
988 /* If there's already targets at this stratum, remove them. */
989 /* FIXME: cagney/2003-10-15: I think this should be popping all
990 targets to CUR, and not just those at this stratum level. */
991 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
992 {
993 /* There's already something at this stratum level. Close it,
994 and un-hook it from the stack. */
995 struct target_ops *tmp = (*cur);
996
997 (*cur) = (*cur)->beneath;
998 tmp->beneath = NULL;
999 target_close (tmp);
1000 }
1001
1002 /* We have removed all targets in our stratum, now add the new one. */
1003 t->beneath = (*cur);
1004 (*cur) = t;
1005
1006 update_current_target ();
1007 }
1008
1009 /* Remove a target_ops vector from the stack, wherever it may be.
1010 Return how many times it was removed (0 or 1). */
1011
1012 int
1013 unpush_target (struct target_ops *t)
1014 {
1015 struct target_ops **cur;
1016 struct target_ops *tmp;
1017
1018 if (t->to_stratum == dummy_stratum)
1019 internal_error (__FILE__, __LINE__,
1020 _("Attempt to unpush the dummy target"));
1021
1022 /* Look for the specified target. Note that we assume that a target
1023 can only occur once in the target stack. */
1024
1025 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1026 {
1027 if ((*cur) == t)
1028 break;
1029 }
1030
1031 /* If we don't find target_ops, quit. Only open targets should be
1032 closed. */
1033 if ((*cur) == NULL)
1034 return 0;
1035
1036 /* Unchain the target. */
1037 tmp = (*cur);
1038 (*cur) = (*cur)->beneath;
1039 tmp->beneath = NULL;
1040
1041 update_current_target ();
1042
1043 /* Finally close the target. Note we do this after unchaining, so
1044 any target method calls from within the target_close
1045 implementation don't end up in T anymore. */
1046 target_close (t);
1047
1048 return 1;
1049 }
1050
1051 void
1052 pop_all_targets_above (enum strata above_stratum)
1053 {
1054 while ((int) (current_target.to_stratum) > (int) above_stratum)
1055 {
1056 if (!unpush_target (target_stack))
1057 {
1058 fprintf_unfiltered (gdb_stderr,
1059 "pop_all_targets couldn't find target %s\n",
1060 target_stack->to_shortname);
1061 internal_error (__FILE__, __LINE__,
1062 _("failed internal consistency check"));
1063 break;
1064 }
1065 }
1066 }
1067
1068 void
1069 pop_all_targets (void)
1070 {
1071 pop_all_targets_above (dummy_stratum);
1072 }
1073
1074 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1075
1076 int
1077 target_is_pushed (struct target_ops *t)
1078 {
1079 struct target_ops **cur;
1080
1081 /* Check magic number. If wrong, it probably means someone changed
1082 the struct definition, but not all the places that initialize one. */
1083 if (t->to_magic != OPS_MAGIC)
1084 {
1085 fprintf_unfiltered (gdb_stderr,
1086 "Magic number of %s target struct wrong\n",
1087 t->to_shortname);
1088 internal_error (__FILE__, __LINE__,
1089 _("failed internal consistency check"));
1090 }
1091
1092 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1093 if (*cur == t)
1094 return 1;
1095
1096 return 0;
1097 }
1098
1099 /* Using the objfile specified in OBJFILE, find the address for the
1100 current thread's thread-local storage with offset OFFSET. */
1101 CORE_ADDR
1102 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1103 {
1104 volatile CORE_ADDR addr = 0;
1105 struct target_ops *target;
1106
1107 for (target = current_target.beneath;
1108 target != NULL;
1109 target = target->beneath)
1110 {
1111 if (target->to_get_thread_local_address != NULL)
1112 break;
1113 }
1114
1115 if (target != NULL
1116 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1117 {
1118 ptid_t ptid = inferior_ptid;
1119 volatile struct gdb_exception ex;
1120
1121 TRY_CATCH (ex, RETURN_MASK_ALL)
1122 {
1123 CORE_ADDR lm_addr;
1124
1125 /* Fetch the load module address for this objfile. */
1126 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1127 objfile);
1128 /* If it's 0, throw the appropriate exception. */
1129 if (lm_addr == 0)
1130 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1131 _("TLS load module not found"));
1132
1133 addr = target->to_get_thread_local_address (target, ptid,
1134 lm_addr, offset);
1135 }
1136 /* If an error occurred, print TLS related messages here. Otherwise,
1137 throw the error to some higher catcher. */
1138 if (ex.reason < 0)
1139 {
1140 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1141
1142 switch (ex.error)
1143 {
1144 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1145 error (_("Cannot find thread-local variables "
1146 "in this thread library."));
1147 break;
1148 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1149 if (objfile_is_library)
1150 error (_("Cannot find shared library `%s' in dynamic"
1151 " linker's load module list"), objfile_name (objfile));
1152 else
1153 error (_("Cannot find executable file `%s' in dynamic"
1154 " linker's load module list"), objfile_name (objfile));
1155 break;
1156 case TLS_NOT_ALLOCATED_YET_ERROR:
1157 if (objfile_is_library)
1158 error (_("The inferior has not yet allocated storage for"
1159 " thread-local variables in\n"
1160 "the shared library `%s'\n"
1161 "for %s"),
1162 objfile_name (objfile), target_pid_to_str (ptid));
1163 else
1164 error (_("The inferior has not yet allocated storage for"
1165 " thread-local variables in\n"
1166 "the executable `%s'\n"
1167 "for %s"),
1168 objfile_name (objfile), target_pid_to_str (ptid));
1169 break;
1170 case TLS_GENERIC_ERROR:
1171 if (objfile_is_library)
1172 error (_("Cannot find thread-local storage for %s, "
1173 "shared library %s:\n%s"),
1174 target_pid_to_str (ptid),
1175 objfile_name (objfile), ex.message);
1176 else
1177 error (_("Cannot find thread-local storage for %s, "
1178 "executable file %s:\n%s"),
1179 target_pid_to_str (ptid),
1180 objfile_name (objfile), ex.message);
1181 break;
1182 default:
1183 throw_exception (ex);
1184 break;
1185 }
1186 }
1187 }
1188 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1189 TLS is an ABI-specific thing. But we don't do that yet. */
1190 else
1191 error (_("Cannot find thread-local variables on this target"));
1192
1193 return addr;
1194 }
1195
1196 const char *
1197 target_xfer_status_to_string (enum target_xfer_status err)
1198 {
1199 #define CASE(X) case X: return #X
1200 switch (err)
1201 {
1202 CASE(TARGET_XFER_E_IO);
1203 CASE(TARGET_XFER_E_UNAVAILABLE);
1204 default:
1205 return "<unknown>";
1206 }
1207 #undef CASE
1208 };
1209
1210
1211 #undef MIN
1212 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1213
1214 /* target_read_string -- read a null terminated string, up to LEN bytes,
1215 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1216 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1217 is responsible for freeing it. Return the number of bytes successfully
1218 read. */
1219
1220 int
1221 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1222 {
1223 int tlen, offset, i;
1224 gdb_byte buf[4];
1225 int errcode = 0;
1226 char *buffer;
1227 int buffer_allocated;
1228 char *bufptr;
1229 unsigned int nbytes_read = 0;
1230
1231 gdb_assert (string);
1232
1233 /* Small for testing. */
1234 buffer_allocated = 4;
1235 buffer = xmalloc (buffer_allocated);
1236 bufptr = buffer;
1237
1238 while (len > 0)
1239 {
1240 tlen = MIN (len, 4 - (memaddr & 3));
1241 offset = memaddr & 3;
1242
1243 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1244 if (errcode != 0)
1245 {
1246 /* The transfer request might have crossed the boundary to an
1247 unallocated region of memory. Retry the transfer, requesting
1248 a single byte. */
1249 tlen = 1;
1250 offset = 0;
1251 errcode = target_read_memory (memaddr, buf, 1);
1252 if (errcode != 0)
1253 goto done;
1254 }
1255
1256 if (bufptr - buffer + tlen > buffer_allocated)
1257 {
1258 unsigned int bytes;
1259
1260 bytes = bufptr - buffer;
1261 buffer_allocated *= 2;
1262 buffer = xrealloc (buffer, buffer_allocated);
1263 bufptr = buffer + bytes;
1264 }
1265
1266 for (i = 0; i < tlen; i++)
1267 {
1268 *bufptr++ = buf[i + offset];
1269 if (buf[i + offset] == '\000')
1270 {
1271 nbytes_read += i + 1;
1272 goto done;
1273 }
1274 }
1275
1276 memaddr += tlen;
1277 len -= tlen;
1278 nbytes_read += tlen;
1279 }
1280 done:
1281 *string = buffer;
1282 if (errnop != NULL)
1283 *errnop = errcode;
1284 return nbytes_read;
1285 }
1286
1287 struct target_section_table *
1288 target_get_section_table (struct target_ops *target)
1289 {
1290 struct target_ops *t;
1291
1292 if (targetdebug)
1293 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1294
1295 for (t = target; t != NULL; t = t->beneath)
1296 if (t->to_get_section_table != NULL)
1297 return (*t->to_get_section_table) (t);
1298
1299 return NULL;
1300 }
1301
1302 /* Find a section containing ADDR. */
1303
1304 struct target_section *
1305 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1306 {
1307 struct target_section_table *table = target_get_section_table (target);
1308 struct target_section *secp;
1309
1310 if (table == NULL)
1311 return NULL;
1312
1313 for (secp = table->sections; secp < table->sections_end; secp++)
1314 {
1315 if (addr >= secp->addr && addr < secp->endaddr)
1316 return secp;
1317 }
1318 return NULL;
1319 }
1320
1321 /* Read memory from the live target, even if currently inspecting a
1322 traceframe. The return is the same as that of target_read. */
1323
1324 static enum target_xfer_status
1325 target_read_live_memory (enum target_object object,
1326 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1327 ULONGEST *xfered_len)
1328 {
1329 enum target_xfer_status ret;
1330 struct cleanup *cleanup;
1331
1332 /* Switch momentarily out of tfind mode so to access live memory.
1333 Note that this must not clear global state, such as the frame
1334 cache, which must still remain valid for the previous traceframe.
1335 We may be _building_ the frame cache at this point. */
1336 cleanup = make_cleanup_restore_traceframe_number ();
1337 set_traceframe_number (-1);
1338
1339 ret = target_xfer_partial (current_target.beneath, object, NULL,
1340 myaddr, NULL, memaddr, len, xfered_len);
1341
1342 do_cleanups (cleanup);
1343 return ret;
1344 }
1345
1346 /* Using the set of read-only target sections of OPS, read live
1347 read-only memory. Note that the actual reads start from the
1348 top-most target again.
1349
1350 For interface/parameters/return description see target.h,
1351 to_xfer_partial. */
1352
1353 static enum target_xfer_status
1354 memory_xfer_live_readonly_partial (struct target_ops *ops,
1355 enum target_object object,
1356 gdb_byte *readbuf, ULONGEST memaddr,
1357 ULONGEST len, ULONGEST *xfered_len)
1358 {
1359 struct target_section *secp;
1360 struct target_section_table *table;
1361
1362 secp = target_section_by_addr (ops, memaddr);
1363 if (secp != NULL
1364 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1365 secp->the_bfd_section)
1366 & SEC_READONLY))
1367 {
1368 struct target_section *p;
1369 ULONGEST memend = memaddr + len;
1370
1371 table = target_get_section_table (ops);
1372
1373 for (p = table->sections; p < table->sections_end; p++)
1374 {
1375 if (memaddr >= p->addr)
1376 {
1377 if (memend <= p->endaddr)
1378 {
1379 /* Entire transfer is within this section. */
1380 return target_read_live_memory (object, memaddr,
1381 readbuf, len, xfered_len);
1382 }
1383 else if (memaddr >= p->endaddr)
1384 {
1385 /* This section ends before the transfer starts. */
1386 continue;
1387 }
1388 else
1389 {
1390 /* This section overlaps the transfer. Just do half. */
1391 len = p->endaddr - memaddr;
1392 return target_read_live_memory (object, memaddr,
1393 readbuf, len, xfered_len);
1394 }
1395 }
1396 }
1397 }
1398
1399 return TARGET_XFER_EOF;
1400 }
1401
1402 /* Read memory from more than one valid target. A core file, for
1403 instance, could have some of memory but delegate other bits to
1404 the target below it. So, we must manually try all targets. */
1405
1406 static enum target_xfer_status
1407 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1408 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1409 ULONGEST *xfered_len)
1410 {
1411 enum target_xfer_status res;
1412
1413 do
1414 {
1415 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1416 readbuf, writebuf, memaddr, len,
1417 xfered_len);
1418 if (res == TARGET_XFER_OK)
1419 break;
1420
1421 /* Stop if the target reports that the memory is not available. */
1422 if (res == TARGET_XFER_E_UNAVAILABLE)
1423 break;
1424
1425 /* We want to continue past core files to executables, but not
1426 past a running target's memory. */
1427 if (ops->to_has_all_memory (ops))
1428 break;
1429
1430 ops = ops->beneath;
1431 }
1432 while (ops != NULL);
1433
1434 return res;
1435 }
1436
1437 /* Perform a partial memory transfer.
1438 For docs see target.h, to_xfer_partial. */
1439
1440 static enum target_xfer_status
1441 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1442 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1443 ULONGEST len, ULONGEST *xfered_len)
1444 {
1445 enum target_xfer_status res;
1446 int reg_len;
1447 struct mem_region *region;
1448 struct inferior *inf;
1449
1450 /* For accesses to unmapped overlay sections, read directly from
1451 files. Must do this first, as MEMADDR may need adjustment. */
1452 if (readbuf != NULL && overlay_debugging)
1453 {
1454 struct obj_section *section = find_pc_overlay (memaddr);
1455
1456 if (pc_in_unmapped_range (memaddr, section))
1457 {
1458 struct target_section_table *table
1459 = target_get_section_table (ops);
1460 const char *section_name = section->the_bfd_section->name;
1461
1462 memaddr = overlay_mapped_address (memaddr, section);
1463 return section_table_xfer_memory_partial (readbuf, writebuf,
1464 memaddr, len, xfered_len,
1465 table->sections,
1466 table->sections_end,
1467 section_name);
1468 }
1469 }
1470
1471 /* Try the executable files, if "trust-readonly-sections" is set. */
1472 if (readbuf != NULL && trust_readonly)
1473 {
1474 struct target_section *secp;
1475 struct target_section_table *table;
1476
1477 secp = target_section_by_addr (ops, memaddr);
1478 if (secp != NULL
1479 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1480 secp->the_bfd_section)
1481 & SEC_READONLY))
1482 {
1483 table = target_get_section_table (ops);
1484 return section_table_xfer_memory_partial (readbuf, writebuf,
1485 memaddr, len, xfered_len,
1486 table->sections,
1487 table->sections_end,
1488 NULL);
1489 }
1490 }
1491
1492 /* If reading unavailable memory in the context of traceframes, and
1493 this address falls within a read-only section, fallback to
1494 reading from live memory. */
1495 if (readbuf != NULL && get_traceframe_number () != -1)
1496 {
1497 VEC(mem_range_s) *available;
1498
1499 /* If we fail to get the set of available memory, then the
1500 target does not support querying traceframe info, and so we
1501 attempt reading from the traceframe anyway (assuming the
1502 target implements the old QTro packet then). */
1503 if (traceframe_available_memory (&available, memaddr, len))
1504 {
1505 struct cleanup *old_chain;
1506
1507 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1508
1509 if (VEC_empty (mem_range_s, available)
1510 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1511 {
1512 /* Don't read into the traceframe's available
1513 memory. */
1514 if (!VEC_empty (mem_range_s, available))
1515 {
1516 LONGEST oldlen = len;
1517
1518 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1519 gdb_assert (len <= oldlen);
1520 }
1521
1522 do_cleanups (old_chain);
1523
1524 /* This goes through the topmost target again. */
1525 res = memory_xfer_live_readonly_partial (ops, object,
1526 readbuf, memaddr,
1527 len, xfered_len);
1528 if (res == TARGET_XFER_OK)
1529 return TARGET_XFER_OK;
1530 else
1531 {
1532 /* No use trying further, we know some memory starting
1533 at MEMADDR isn't available. */
1534 *xfered_len = len;
1535 return TARGET_XFER_E_UNAVAILABLE;
1536 }
1537 }
1538
1539 /* Don't try to read more than how much is available, in
1540 case the target implements the deprecated QTro packet to
1541 cater for older GDBs (the target's knowledge of read-only
1542 sections may be outdated by now). */
1543 len = VEC_index (mem_range_s, available, 0)->length;
1544
1545 do_cleanups (old_chain);
1546 }
1547 }
1548
1549 /* Try GDB's internal data cache. */
1550 region = lookup_mem_region (memaddr);
1551 /* region->hi == 0 means there's no upper bound. */
1552 if (memaddr + len < region->hi || region->hi == 0)
1553 reg_len = len;
1554 else
1555 reg_len = region->hi - memaddr;
1556
1557 switch (region->attrib.mode)
1558 {
1559 case MEM_RO:
1560 if (writebuf != NULL)
1561 return TARGET_XFER_E_IO;
1562 break;
1563
1564 case MEM_WO:
1565 if (readbuf != NULL)
1566 return TARGET_XFER_E_IO;
1567 break;
1568
1569 case MEM_FLASH:
1570 /* We only support writing to flash during "load" for now. */
1571 if (writebuf != NULL)
1572 error (_("Writing to flash memory forbidden in this context"));
1573 break;
1574
1575 case MEM_NONE:
1576 return TARGET_XFER_E_IO;
1577 }
1578
1579 if (!ptid_equal (inferior_ptid, null_ptid))
1580 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1581 else
1582 inf = NULL;
1583
1584 if (inf != NULL
1585 /* The dcache reads whole cache lines; that doesn't play well
1586 with reading from a trace buffer, because reading outside of
1587 the collected memory range fails. */
1588 && get_traceframe_number () == -1
1589 && (region->attrib.cache
1590 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1591 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1592 {
1593 DCACHE *dcache = target_dcache_get_or_init ();
1594 int l;
1595
1596 if (readbuf != NULL)
1597 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1598 else
1599 /* FIXME drow/2006-08-09: If we're going to preserve const
1600 correctness dcache_xfer_memory should take readbuf and
1601 writebuf. */
1602 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1603 reg_len, 1);
1604 if (l <= 0)
1605 return TARGET_XFER_E_IO;
1606 else
1607 {
1608 *xfered_len = (ULONGEST) l;
1609 return TARGET_XFER_OK;
1610 }
1611 }
1612
1613 /* If none of those methods found the memory we wanted, fall back
1614 to a target partial transfer. Normally a single call to
1615 to_xfer_partial is enough; if it doesn't recognize an object
1616 it will call the to_xfer_partial of the next target down.
1617 But for memory this won't do. Memory is the only target
1618 object which can be read from more than one valid target.
1619 A core file, for instance, could have some of memory but
1620 delegate other bits to the target below it. So, we must
1621 manually try all targets. */
1622
1623 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1624 xfered_len);
1625
1626 /* Make sure the cache gets updated no matter what - if we are writing
1627 to the stack. Even if this write is not tagged as such, we still need
1628 to update the cache. */
1629
1630 if (res == TARGET_XFER_OK
1631 && inf != NULL
1632 && writebuf != NULL
1633 && target_dcache_init_p ()
1634 && !region->attrib.cache
1635 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1636 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1637 {
1638 DCACHE *dcache = target_dcache_get ();
1639
1640 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1641 }
1642
1643 /* If we still haven't got anything, return the last error. We
1644 give up. */
1645 return res;
1646 }
1647
1648 /* Perform a partial memory transfer. For docs see target.h,
1649 to_xfer_partial. */
1650
1651 static enum target_xfer_status
1652 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1653 gdb_byte *readbuf, const gdb_byte *writebuf,
1654 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1655 {
1656 enum target_xfer_status res;
1657
1658 /* Zero length requests are ok and require no work. */
1659 if (len == 0)
1660 return TARGET_XFER_EOF;
1661
1662 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1663 breakpoint insns, thus hiding out from higher layers whether
1664 there are software breakpoints inserted in the code stream. */
1665 if (readbuf != NULL)
1666 {
1667 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1668 xfered_len);
1669
1670 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1671 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1672 }
1673 else
1674 {
1675 void *buf;
1676 struct cleanup *old_chain;
1677
1678 /* A large write request is likely to be partially satisfied
1679 by memory_xfer_partial_1. We will continually malloc
1680 and free a copy of the entire write request for breakpoint
1681 shadow handling even though we only end up writing a small
1682 subset of it. Cap writes to 4KB to mitigate this. */
1683 len = min (4096, len);
1684
1685 buf = xmalloc (len);
1686 old_chain = make_cleanup (xfree, buf);
1687 memcpy (buf, writebuf, len);
1688
1689 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1690 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1691 xfered_len);
1692
1693 do_cleanups (old_chain);
1694 }
1695
1696 return res;
1697 }
1698
1699 static void
1700 restore_show_memory_breakpoints (void *arg)
1701 {
1702 show_memory_breakpoints = (uintptr_t) arg;
1703 }
1704
1705 struct cleanup *
1706 make_show_memory_breakpoints_cleanup (int show)
1707 {
1708 int current = show_memory_breakpoints;
1709
1710 show_memory_breakpoints = show;
1711 return make_cleanup (restore_show_memory_breakpoints,
1712 (void *) (uintptr_t) current);
1713 }
1714
1715 /* For docs see target.h, to_xfer_partial. */
1716
1717 enum target_xfer_status
1718 target_xfer_partial (struct target_ops *ops,
1719 enum target_object object, const char *annex,
1720 gdb_byte *readbuf, const gdb_byte *writebuf,
1721 ULONGEST offset, ULONGEST len,
1722 ULONGEST *xfered_len)
1723 {
1724 enum target_xfer_status retval;
1725
1726 gdb_assert (ops->to_xfer_partial != NULL);
1727
1728 /* Transfer is done when LEN is zero. */
1729 if (len == 0)
1730 return TARGET_XFER_EOF;
1731
1732 if (writebuf && !may_write_memory)
1733 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1734 core_addr_to_string_nz (offset), plongest (len));
1735
1736 *xfered_len = 0;
1737
1738 /* If this is a memory transfer, let the memory-specific code
1739 have a look at it instead. Memory transfers are more
1740 complicated. */
1741 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1742 || object == TARGET_OBJECT_CODE_MEMORY)
1743 retval = memory_xfer_partial (ops, object, readbuf,
1744 writebuf, offset, len, xfered_len);
1745 else if (object == TARGET_OBJECT_RAW_MEMORY)
1746 {
1747 /* Request the normal memory object from other layers. */
1748 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1749 xfered_len);
1750 }
1751 else
1752 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1753 writebuf, offset, len, xfered_len);
1754
1755 if (targetdebug)
1756 {
1757 const unsigned char *myaddr = NULL;
1758
1759 fprintf_unfiltered (gdb_stdlog,
1760 "%s:target_xfer_partial "
1761 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1762 ops->to_shortname,
1763 (int) object,
1764 (annex ? annex : "(null)"),
1765 host_address_to_string (readbuf),
1766 host_address_to_string (writebuf),
1767 core_addr_to_string_nz (offset),
1768 pulongest (len), retval,
1769 pulongest (*xfered_len));
1770
1771 if (readbuf)
1772 myaddr = readbuf;
1773 if (writebuf)
1774 myaddr = writebuf;
1775 if (retval == TARGET_XFER_OK && myaddr != NULL)
1776 {
1777 int i;
1778
1779 fputs_unfiltered (", bytes =", gdb_stdlog);
1780 for (i = 0; i < *xfered_len; i++)
1781 {
1782 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1783 {
1784 if (targetdebug < 2 && i > 0)
1785 {
1786 fprintf_unfiltered (gdb_stdlog, " ...");
1787 break;
1788 }
1789 fprintf_unfiltered (gdb_stdlog, "\n");
1790 }
1791
1792 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1793 }
1794 }
1795
1796 fputc_unfiltered ('\n', gdb_stdlog);
1797 }
1798
1799 /* Check implementations of to_xfer_partial update *XFERED_LEN
1800 properly. Do assertion after printing debug messages, so that we
1801 can find more clues on assertion failure from debugging messages. */
1802 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1803 gdb_assert (*xfered_len > 0);
1804
1805 return retval;
1806 }
1807
1808 /* Read LEN bytes of target memory at address MEMADDR, placing the
1809 results in GDB's memory at MYADDR. Returns either 0 for success or
1810 TARGET_XFER_E_IO if any error occurs.
1811
1812 If an error occurs, no guarantee is made about the contents of the data at
1813 MYADDR. In particular, the caller should not depend upon partial reads
1814 filling the buffer with good data. There is no way for the caller to know
1815 how much good data might have been transfered anyway. Callers that can
1816 deal with partial reads should call target_read (which will retry until
1817 it makes no progress, and then return how much was transferred). */
1818
1819 int
1820 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1821 {
1822 /* Dispatch to the topmost target, not the flattened current_target.
1823 Memory accesses check target->to_has_(all_)memory, and the
1824 flattened target doesn't inherit those. */
1825 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1826 myaddr, memaddr, len) == len)
1827 return 0;
1828 else
1829 return TARGET_XFER_E_IO;
1830 }
1831
1832 /* Like target_read_memory, but specify explicitly that this is a read
1833 from the target's raw memory. That is, this read bypasses the
1834 dcache, breakpoint shadowing, etc. */
1835
1836 int
1837 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1838 {
1839 /* See comment in target_read_memory about why the request starts at
1840 current_target.beneath. */
1841 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1842 myaddr, memaddr, len) == len)
1843 return 0;
1844 else
1845 return TARGET_XFER_E_IO;
1846 }
1847
1848 /* Like target_read_memory, but specify explicitly that this is a read from
1849 the target's stack. This may trigger different cache behavior. */
1850
1851 int
1852 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1853 {
1854 /* See comment in target_read_memory about why the request starts at
1855 current_target.beneath. */
1856 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1857 myaddr, memaddr, len) == len)
1858 return 0;
1859 else
1860 return TARGET_XFER_E_IO;
1861 }
1862
1863 /* Like target_read_memory, but specify explicitly that this is a read from
1864 the target's code. This may trigger different cache behavior. */
1865
1866 int
1867 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1868 {
1869 /* See comment in target_read_memory about why the request starts at
1870 current_target.beneath. */
1871 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1872 myaddr, memaddr, len) == len)
1873 return 0;
1874 else
1875 return TARGET_XFER_E_IO;
1876 }
1877
1878 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1879 Returns either 0 for success or TARGET_XFER_E_IO if any
1880 error occurs. If an error occurs, no guarantee is made about how
1881 much data got written. Callers that can deal with partial writes
1882 should call target_write. */
1883
1884 int
1885 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1886 {
1887 /* See comment in target_read_memory about why the request starts at
1888 current_target.beneath. */
1889 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1890 myaddr, memaddr, len) == len)
1891 return 0;
1892 else
1893 return TARGET_XFER_E_IO;
1894 }
1895
1896 /* Write LEN bytes from MYADDR to target raw memory at address
1897 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1898 if any error occurs. If an error occurs, no guarantee is made
1899 about how much data got written. Callers that can deal with
1900 partial writes should call target_write. */
1901
1902 int
1903 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1904 {
1905 /* See comment in target_read_memory about why the request starts at
1906 current_target.beneath. */
1907 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1908 myaddr, memaddr, len) == len)
1909 return 0;
1910 else
1911 return TARGET_XFER_E_IO;
1912 }
1913
1914 /* Fetch the target's memory map. */
1915
1916 VEC(mem_region_s) *
1917 target_memory_map (void)
1918 {
1919 VEC(mem_region_s) *result;
1920 struct mem_region *last_one, *this_one;
1921 int ix;
1922 struct target_ops *t;
1923
1924 if (targetdebug)
1925 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1926
1927 for (t = current_target.beneath; t != NULL; t = t->beneath)
1928 if (t->to_memory_map != NULL)
1929 break;
1930
1931 if (t == NULL)
1932 return NULL;
1933
1934 result = t->to_memory_map (t);
1935 if (result == NULL)
1936 return NULL;
1937
1938 qsort (VEC_address (mem_region_s, result),
1939 VEC_length (mem_region_s, result),
1940 sizeof (struct mem_region), mem_region_cmp);
1941
1942 /* Check that regions do not overlap. Simultaneously assign
1943 a numbering for the "mem" commands to use to refer to
1944 each region. */
1945 last_one = NULL;
1946 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1947 {
1948 this_one->number = ix;
1949
1950 if (last_one && last_one->hi > this_one->lo)
1951 {
1952 warning (_("Overlapping regions in memory map: ignoring"));
1953 VEC_free (mem_region_s, result);
1954 return NULL;
1955 }
1956 last_one = this_one;
1957 }
1958
1959 return result;
1960 }
1961
1962 void
1963 target_flash_erase (ULONGEST address, LONGEST length)
1964 {
1965 struct target_ops *t;
1966
1967 for (t = current_target.beneath; t != NULL; t = t->beneath)
1968 if (t->to_flash_erase != NULL)
1969 {
1970 if (targetdebug)
1971 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1972 hex_string (address), phex (length, 0));
1973 t->to_flash_erase (t, address, length);
1974 return;
1975 }
1976
1977 tcomplain ();
1978 }
1979
1980 void
1981 target_flash_done (void)
1982 {
1983 struct target_ops *t;
1984
1985 for (t = current_target.beneath; t != NULL; t = t->beneath)
1986 if (t->to_flash_done != NULL)
1987 {
1988 if (targetdebug)
1989 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1990 t->to_flash_done (t);
1991 return;
1992 }
1993
1994 tcomplain ();
1995 }
1996
1997 static void
1998 show_trust_readonly (struct ui_file *file, int from_tty,
1999 struct cmd_list_element *c, const char *value)
2000 {
2001 fprintf_filtered (file,
2002 _("Mode for reading from readonly sections is %s.\n"),
2003 value);
2004 }
2005
2006 /* More generic transfers. */
2007
2008 static enum target_xfer_status
2009 default_xfer_partial (struct target_ops *ops, enum target_object object,
2010 const char *annex, gdb_byte *readbuf,
2011 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2012 ULONGEST *xfered_len)
2013 {
2014 if (object == TARGET_OBJECT_MEMORY
2015 && ops->deprecated_xfer_memory != NULL)
2016 /* If available, fall back to the target's
2017 "deprecated_xfer_memory" method. */
2018 {
2019 int xfered = -1;
2020
2021 errno = 0;
2022 if (writebuf != NULL)
2023 {
2024 void *buffer = xmalloc (len);
2025 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2026
2027 memcpy (buffer, writebuf, len);
2028 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2029 1/*write*/, NULL, ops);
2030 do_cleanups (cleanup);
2031 }
2032 if (readbuf != NULL)
2033 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2034 0/*read*/, NULL, ops);
2035 if (xfered > 0)
2036 {
2037 *xfered_len = (ULONGEST) xfered;
2038 return TARGET_XFER_E_IO;
2039 }
2040 else if (xfered == 0 && errno == 0)
2041 /* "deprecated_xfer_memory" uses 0, cross checked against
2042 ERRNO as one indication of an error. */
2043 return TARGET_XFER_EOF;
2044 else
2045 return TARGET_XFER_E_IO;
2046 }
2047 else
2048 {
2049 gdb_assert (ops->beneath != NULL);
2050 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2051 readbuf, writebuf, offset, len,
2052 xfered_len);
2053 }
2054 }
2055
2056 /* Target vector read/write partial wrapper functions. */
2057
2058 static enum target_xfer_status
2059 target_read_partial (struct target_ops *ops,
2060 enum target_object object,
2061 const char *annex, gdb_byte *buf,
2062 ULONGEST offset, ULONGEST len,
2063 ULONGEST *xfered_len)
2064 {
2065 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2066 xfered_len);
2067 }
2068
2069 static enum target_xfer_status
2070 target_write_partial (struct target_ops *ops,
2071 enum target_object object,
2072 const char *annex, const gdb_byte *buf,
2073 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2074 {
2075 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2076 xfered_len);
2077 }
2078
2079 /* Wrappers to perform the full transfer. */
2080
2081 /* For docs on target_read see target.h. */
2082
2083 LONGEST
2084 target_read (struct target_ops *ops,
2085 enum target_object object,
2086 const char *annex, gdb_byte *buf,
2087 ULONGEST offset, LONGEST len)
2088 {
2089 LONGEST xfered = 0;
2090
2091 while (xfered < len)
2092 {
2093 ULONGEST xfered_len;
2094 enum target_xfer_status status;
2095
2096 status = target_read_partial (ops, object, annex,
2097 (gdb_byte *) buf + xfered,
2098 offset + xfered, len - xfered,
2099 &xfered_len);
2100
2101 /* Call an observer, notifying them of the xfer progress? */
2102 if (status == TARGET_XFER_EOF)
2103 return xfered;
2104 else if (status == TARGET_XFER_OK)
2105 {
2106 xfered += xfered_len;
2107 QUIT;
2108 }
2109 else
2110 return -1;
2111
2112 }
2113 return len;
2114 }
2115
2116 /* Assuming that the entire [begin, end) range of memory cannot be
2117 read, try to read whatever subrange is possible to read.
2118
2119 The function returns, in RESULT, either zero or one memory block.
2120 If there's a readable subrange at the beginning, it is completely
2121 read and returned. Any further readable subrange will not be read.
2122 Otherwise, if there's a readable subrange at the end, it will be
2123 completely read and returned. Any readable subranges before it
2124 (obviously, not starting at the beginning), will be ignored. In
2125 other cases -- either no readable subrange, or readable subrange(s)
2126 that is neither at the beginning, or end, nothing is returned.
2127
2128 The purpose of this function is to handle a read across a boundary
2129 of accessible memory in a case when memory map is not available.
2130 The above restrictions are fine for this case, but will give
2131 incorrect results if the memory is 'patchy'. However, supporting
2132 'patchy' memory would require trying to read every single byte,
2133 and it seems unacceptable solution. Explicit memory map is
2134 recommended for this case -- and target_read_memory_robust will
2135 take care of reading multiple ranges then. */
2136
2137 static void
2138 read_whatever_is_readable (struct target_ops *ops,
2139 ULONGEST begin, ULONGEST end,
2140 VEC(memory_read_result_s) **result)
2141 {
2142 gdb_byte *buf = xmalloc (end - begin);
2143 ULONGEST current_begin = begin;
2144 ULONGEST current_end = end;
2145 int forward;
2146 memory_read_result_s r;
2147 ULONGEST xfered_len;
2148
2149 /* If we previously failed to read 1 byte, nothing can be done here. */
2150 if (end - begin <= 1)
2151 {
2152 xfree (buf);
2153 return;
2154 }
2155
2156 /* Check that either first or the last byte is readable, and give up
2157 if not. This heuristic is meant to permit reading accessible memory
2158 at the boundary of accessible region. */
2159 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2160 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2161 {
2162 forward = 1;
2163 ++current_begin;
2164 }
2165 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2166 buf + (end-begin) - 1, end - 1, 1,
2167 &xfered_len) == TARGET_XFER_OK)
2168 {
2169 forward = 0;
2170 --current_end;
2171 }
2172 else
2173 {
2174 xfree (buf);
2175 return;
2176 }
2177
2178 /* Loop invariant is that the [current_begin, current_end) was previously
2179 found to be not readable as a whole.
2180
2181 Note loop condition -- if the range has 1 byte, we can't divide the range
2182 so there's no point trying further. */
2183 while (current_end - current_begin > 1)
2184 {
2185 ULONGEST first_half_begin, first_half_end;
2186 ULONGEST second_half_begin, second_half_end;
2187 LONGEST xfer;
2188 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2189
2190 if (forward)
2191 {
2192 first_half_begin = current_begin;
2193 first_half_end = middle;
2194 second_half_begin = middle;
2195 second_half_end = current_end;
2196 }
2197 else
2198 {
2199 first_half_begin = middle;
2200 first_half_end = current_end;
2201 second_half_begin = current_begin;
2202 second_half_end = middle;
2203 }
2204
2205 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2206 buf + (first_half_begin - begin),
2207 first_half_begin,
2208 first_half_end - first_half_begin);
2209
2210 if (xfer == first_half_end - first_half_begin)
2211 {
2212 /* This half reads up fine. So, the error must be in the
2213 other half. */
2214 current_begin = second_half_begin;
2215 current_end = second_half_end;
2216 }
2217 else
2218 {
2219 /* This half is not readable. Because we've tried one byte, we
2220 know some part of this half if actually redable. Go to the next
2221 iteration to divide again and try to read.
2222
2223 We don't handle the other half, because this function only tries
2224 to read a single readable subrange. */
2225 current_begin = first_half_begin;
2226 current_end = first_half_end;
2227 }
2228 }
2229
2230 if (forward)
2231 {
2232 /* The [begin, current_begin) range has been read. */
2233 r.begin = begin;
2234 r.end = current_begin;
2235 r.data = buf;
2236 }
2237 else
2238 {
2239 /* The [current_end, end) range has been read. */
2240 LONGEST rlen = end - current_end;
2241
2242 r.data = xmalloc (rlen);
2243 memcpy (r.data, buf + current_end - begin, rlen);
2244 r.begin = current_end;
2245 r.end = end;
2246 xfree (buf);
2247 }
2248 VEC_safe_push(memory_read_result_s, (*result), &r);
2249 }
2250
2251 void
2252 free_memory_read_result_vector (void *x)
2253 {
2254 VEC(memory_read_result_s) *v = x;
2255 memory_read_result_s *current;
2256 int ix;
2257
2258 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2259 {
2260 xfree (current->data);
2261 }
2262 VEC_free (memory_read_result_s, v);
2263 }
2264
2265 VEC(memory_read_result_s) *
2266 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2267 {
2268 VEC(memory_read_result_s) *result = 0;
2269
2270 LONGEST xfered = 0;
2271 while (xfered < len)
2272 {
2273 struct mem_region *region = lookup_mem_region (offset + xfered);
2274 LONGEST rlen;
2275
2276 /* If there is no explicit region, a fake one should be created. */
2277 gdb_assert (region);
2278
2279 if (region->hi == 0)
2280 rlen = len - xfered;
2281 else
2282 rlen = region->hi - offset;
2283
2284 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2285 {
2286 /* Cannot read this region. Note that we can end up here only
2287 if the region is explicitly marked inaccessible, or
2288 'inaccessible-by-default' is in effect. */
2289 xfered += rlen;
2290 }
2291 else
2292 {
2293 LONGEST to_read = min (len - xfered, rlen);
2294 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2295
2296 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2297 (gdb_byte *) buffer,
2298 offset + xfered, to_read);
2299 /* Call an observer, notifying them of the xfer progress? */
2300 if (xfer <= 0)
2301 {
2302 /* Got an error reading full chunk. See if maybe we can read
2303 some subrange. */
2304 xfree (buffer);
2305 read_whatever_is_readable (ops, offset + xfered,
2306 offset + xfered + to_read, &result);
2307 xfered += to_read;
2308 }
2309 else
2310 {
2311 struct memory_read_result r;
2312 r.data = buffer;
2313 r.begin = offset + xfered;
2314 r.end = r.begin + xfer;
2315 VEC_safe_push (memory_read_result_s, result, &r);
2316 xfered += xfer;
2317 }
2318 QUIT;
2319 }
2320 }
2321 return result;
2322 }
2323
2324
2325 /* An alternative to target_write with progress callbacks. */
2326
2327 LONGEST
2328 target_write_with_progress (struct target_ops *ops,
2329 enum target_object object,
2330 const char *annex, const gdb_byte *buf,
2331 ULONGEST offset, LONGEST len,
2332 void (*progress) (ULONGEST, void *), void *baton)
2333 {
2334 LONGEST xfered = 0;
2335
2336 /* Give the progress callback a chance to set up. */
2337 if (progress)
2338 (*progress) (0, baton);
2339
2340 while (xfered < len)
2341 {
2342 ULONGEST xfered_len;
2343 enum target_xfer_status status;
2344
2345 status = target_write_partial (ops, object, annex,
2346 (gdb_byte *) buf + xfered,
2347 offset + xfered, len - xfered,
2348 &xfered_len);
2349
2350 if (status == TARGET_XFER_EOF)
2351 return xfered;
2352 if (TARGET_XFER_STATUS_ERROR_P (status))
2353 return -1;
2354
2355 gdb_assert (status == TARGET_XFER_OK);
2356 if (progress)
2357 (*progress) (xfered_len, baton);
2358
2359 xfered += xfered_len;
2360 QUIT;
2361 }
2362 return len;
2363 }
2364
2365 /* For docs on target_write see target.h. */
2366
2367 LONGEST
2368 target_write (struct target_ops *ops,
2369 enum target_object object,
2370 const char *annex, const gdb_byte *buf,
2371 ULONGEST offset, LONGEST len)
2372 {
2373 return target_write_with_progress (ops, object, annex, buf, offset, len,
2374 NULL, NULL);
2375 }
2376
2377 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2378 the size of the transferred data. PADDING additional bytes are
2379 available in *BUF_P. This is a helper function for
2380 target_read_alloc; see the declaration of that function for more
2381 information. */
2382
2383 static LONGEST
2384 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2385 const char *annex, gdb_byte **buf_p, int padding)
2386 {
2387 size_t buf_alloc, buf_pos;
2388 gdb_byte *buf;
2389
2390 /* This function does not have a length parameter; it reads the
2391 entire OBJECT). Also, it doesn't support objects fetched partly
2392 from one target and partly from another (in a different stratum,
2393 e.g. a core file and an executable). Both reasons make it
2394 unsuitable for reading memory. */
2395 gdb_assert (object != TARGET_OBJECT_MEMORY);
2396
2397 /* Start by reading up to 4K at a time. The target will throttle
2398 this number down if necessary. */
2399 buf_alloc = 4096;
2400 buf = xmalloc (buf_alloc);
2401 buf_pos = 0;
2402 while (1)
2403 {
2404 ULONGEST xfered_len;
2405 enum target_xfer_status status;
2406
2407 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2408 buf_pos, buf_alloc - buf_pos - padding,
2409 &xfered_len);
2410
2411 if (status == TARGET_XFER_EOF)
2412 {
2413 /* Read all there was. */
2414 if (buf_pos == 0)
2415 xfree (buf);
2416 else
2417 *buf_p = buf;
2418 return buf_pos;
2419 }
2420 else if (status != TARGET_XFER_OK)
2421 {
2422 /* An error occurred. */
2423 xfree (buf);
2424 return TARGET_XFER_E_IO;
2425 }
2426
2427 buf_pos += xfered_len;
2428
2429 /* If the buffer is filling up, expand it. */
2430 if (buf_alloc < buf_pos * 2)
2431 {
2432 buf_alloc *= 2;
2433 buf = xrealloc (buf, buf_alloc);
2434 }
2435
2436 QUIT;
2437 }
2438 }
2439
2440 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2441 the size of the transferred data. See the declaration in "target.h"
2442 function for more information about the return value. */
2443
2444 LONGEST
2445 target_read_alloc (struct target_ops *ops, enum target_object object,
2446 const char *annex, gdb_byte **buf_p)
2447 {
2448 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2449 }
2450
2451 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2452 returned as a string, allocated using xmalloc. If an error occurs
2453 or the transfer is unsupported, NULL is returned. Empty objects
2454 are returned as allocated but empty strings. A warning is issued
2455 if the result contains any embedded NUL bytes. */
2456
2457 char *
2458 target_read_stralloc (struct target_ops *ops, enum target_object object,
2459 const char *annex)
2460 {
2461 gdb_byte *buffer;
2462 char *bufstr;
2463 LONGEST i, transferred;
2464
2465 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2466 bufstr = (char *) buffer;
2467
2468 if (transferred < 0)
2469 return NULL;
2470
2471 if (transferred == 0)
2472 return xstrdup ("");
2473
2474 bufstr[transferred] = 0;
2475
2476 /* Check for embedded NUL bytes; but allow trailing NULs. */
2477 for (i = strlen (bufstr); i < transferred; i++)
2478 if (bufstr[i] != 0)
2479 {
2480 warning (_("target object %d, annex %s, "
2481 "contained unexpected null characters"),
2482 (int) object, annex ? annex : "(none)");
2483 break;
2484 }
2485
2486 return bufstr;
2487 }
2488
2489 /* Memory transfer methods. */
2490
2491 void
2492 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2493 LONGEST len)
2494 {
2495 /* This method is used to read from an alternate, non-current
2496 target. This read must bypass the overlay support (as symbols
2497 don't match this target), and GDB's internal cache (wrong cache
2498 for this target). */
2499 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2500 != len)
2501 memory_error (TARGET_XFER_E_IO, addr);
2502 }
2503
2504 ULONGEST
2505 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2506 int len, enum bfd_endian byte_order)
2507 {
2508 gdb_byte buf[sizeof (ULONGEST)];
2509
2510 gdb_assert (len <= sizeof (buf));
2511 get_target_memory (ops, addr, buf, len);
2512 return extract_unsigned_integer (buf, len, byte_order);
2513 }
2514
2515 /* See target.h. */
2516
2517 int
2518 target_insert_breakpoint (struct gdbarch *gdbarch,
2519 struct bp_target_info *bp_tgt)
2520 {
2521 if (!may_insert_breakpoints)
2522 {
2523 warning (_("May not insert breakpoints"));
2524 return 1;
2525 }
2526
2527 return current_target.to_insert_breakpoint (&current_target,
2528 gdbarch, bp_tgt);
2529 }
2530
2531 /* See target.h. */
2532
2533 int
2534 target_remove_breakpoint (struct gdbarch *gdbarch,
2535 struct bp_target_info *bp_tgt)
2536 {
2537 /* This is kind of a weird case to handle, but the permission might
2538 have been changed after breakpoints were inserted - in which case
2539 we should just take the user literally and assume that any
2540 breakpoints should be left in place. */
2541 if (!may_insert_breakpoints)
2542 {
2543 warning (_("May not remove breakpoints"));
2544 return 1;
2545 }
2546
2547 return current_target.to_remove_breakpoint (&current_target,
2548 gdbarch, bp_tgt);
2549 }
2550
2551 static void
2552 target_info (char *args, int from_tty)
2553 {
2554 struct target_ops *t;
2555 int has_all_mem = 0;
2556
2557 if (symfile_objfile != NULL)
2558 printf_unfiltered (_("Symbols from \"%s\".\n"),
2559 objfile_name (symfile_objfile));
2560
2561 for (t = target_stack; t != NULL; t = t->beneath)
2562 {
2563 if (!(*t->to_has_memory) (t))
2564 continue;
2565
2566 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2567 continue;
2568 if (has_all_mem)
2569 printf_unfiltered (_("\tWhile running this, "
2570 "GDB does not access memory from...\n"));
2571 printf_unfiltered ("%s:\n", t->to_longname);
2572 (t->to_files_info) (t);
2573 has_all_mem = (*t->to_has_all_memory) (t);
2574 }
2575 }
2576
2577 /* This function is called before any new inferior is created, e.g.
2578 by running a program, attaching, or connecting to a target.
2579 It cleans up any state from previous invocations which might
2580 change between runs. This is a subset of what target_preopen
2581 resets (things which might change between targets). */
2582
2583 void
2584 target_pre_inferior (int from_tty)
2585 {
2586 /* Clear out solib state. Otherwise the solib state of the previous
2587 inferior might have survived and is entirely wrong for the new
2588 target. This has been observed on GNU/Linux using glibc 2.3. How
2589 to reproduce:
2590
2591 bash$ ./foo&
2592 [1] 4711
2593 bash$ ./foo&
2594 [1] 4712
2595 bash$ gdb ./foo
2596 [...]
2597 (gdb) attach 4711
2598 (gdb) detach
2599 (gdb) attach 4712
2600 Cannot access memory at address 0xdeadbeef
2601 */
2602
2603 /* In some OSs, the shared library list is the same/global/shared
2604 across inferiors. If code is shared between processes, so are
2605 memory regions and features. */
2606 if (!gdbarch_has_global_solist (target_gdbarch ()))
2607 {
2608 no_shared_libraries (NULL, from_tty);
2609
2610 invalidate_target_mem_regions ();
2611
2612 target_clear_description ();
2613 }
2614
2615 agent_capability_invalidate ();
2616 }
2617
2618 /* Callback for iterate_over_inferiors. Gets rid of the given
2619 inferior. */
2620
2621 static int
2622 dispose_inferior (struct inferior *inf, void *args)
2623 {
2624 struct thread_info *thread;
2625
2626 thread = any_thread_of_process (inf->pid);
2627 if (thread)
2628 {
2629 switch_to_thread (thread->ptid);
2630
2631 /* Core inferiors actually should be detached, not killed. */
2632 if (target_has_execution)
2633 target_kill ();
2634 else
2635 target_detach (NULL, 0);
2636 }
2637
2638 return 0;
2639 }
2640
2641 /* This is to be called by the open routine before it does
2642 anything. */
2643
2644 void
2645 target_preopen (int from_tty)
2646 {
2647 dont_repeat ();
2648
2649 if (have_inferiors ())
2650 {
2651 if (!from_tty
2652 || !have_live_inferiors ()
2653 || query (_("A program is being debugged already. Kill it? ")))
2654 iterate_over_inferiors (dispose_inferior, NULL);
2655 else
2656 error (_("Program not killed."));
2657 }
2658
2659 /* Calling target_kill may remove the target from the stack. But if
2660 it doesn't (which seems like a win for UDI), remove it now. */
2661 /* Leave the exec target, though. The user may be switching from a
2662 live process to a core of the same program. */
2663 pop_all_targets_above (file_stratum);
2664
2665 target_pre_inferior (from_tty);
2666 }
2667
2668 /* Detach a target after doing deferred register stores. */
2669
2670 void
2671 target_detach (const char *args, int from_tty)
2672 {
2673 struct target_ops* t;
2674
2675 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2676 /* Don't remove global breakpoints here. They're removed on
2677 disconnection from the target. */
2678 ;
2679 else
2680 /* If we're in breakpoints-always-inserted mode, have to remove
2681 them before detaching. */
2682 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2683
2684 prepare_for_detach ();
2685
2686 current_target.to_detach (&current_target, args, from_tty);
2687 if (targetdebug)
2688 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2689 args, from_tty);
2690 }
2691
2692 void
2693 target_disconnect (char *args, int from_tty)
2694 {
2695 struct target_ops *t;
2696
2697 /* If we're in breakpoints-always-inserted mode or if breakpoints
2698 are global across processes, we have to remove them before
2699 disconnecting. */
2700 remove_breakpoints ();
2701
2702 for (t = current_target.beneath; t != NULL; t = t->beneath)
2703 if (t->to_disconnect != NULL)
2704 {
2705 if (targetdebug)
2706 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2707 args, from_tty);
2708 t->to_disconnect (t, args, from_tty);
2709 return;
2710 }
2711
2712 tcomplain ();
2713 }
2714
2715 ptid_t
2716 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2717 {
2718 struct target_ops *t;
2719 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2720 status, options);
2721
2722 if (targetdebug)
2723 {
2724 char *status_string;
2725 char *options_string;
2726
2727 status_string = target_waitstatus_to_string (status);
2728 options_string = target_options_to_string (options);
2729 fprintf_unfiltered (gdb_stdlog,
2730 "target_wait (%d, status, options={%s})"
2731 " = %d, %s\n",
2732 ptid_get_pid (ptid), options_string,
2733 ptid_get_pid (retval), status_string);
2734 xfree (status_string);
2735 xfree (options_string);
2736 }
2737
2738 return retval;
2739 }
2740
2741 char *
2742 target_pid_to_str (ptid_t ptid)
2743 {
2744 struct target_ops *t;
2745
2746 for (t = current_target.beneath; t != NULL; t = t->beneath)
2747 {
2748 if (t->to_pid_to_str != NULL)
2749 return (*t->to_pid_to_str) (t, ptid);
2750 }
2751
2752 return normal_pid_to_str (ptid);
2753 }
2754
2755 char *
2756 target_thread_name (struct thread_info *info)
2757 {
2758 struct target_ops *t;
2759
2760 for (t = current_target.beneath; t != NULL; t = t->beneath)
2761 {
2762 if (t->to_thread_name != NULL)
2763 return (*t->to_thread_name) (t, info);
2764 }
2765
2766 return NULL;
2767 }
2768
2769 void
2770 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2771 {
2772 struct target_ops *t;
2773
2774 target_dcache_invalidate ();
2775
2776 current_target.to_resume (&current_target, ptid, step, signal);
2777 if (targetdebug)
2778 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2779 ptid_get_pid (ptid),
2780 step ? "step" : "continue",
2781 gdb_signal_to_name (signal));
2782
2783 registers_changed_ptid (ptid);
2784 set_executing (ptid, 1);
2785 set_running (ptid, 1);
2786 clear_inline_frame_state (ptid);
2787 }
2788
2789 void
2790 target_pass_signals (int numsigs, unsigned char *pass_signals)
2791 {
2792 struct target_ops *t;
2793
2794 for (t = current_target.beneath; t != NULL; t = t->beneath)
2795 {
2796 if (t->to_pass_signals != NULL)
2797 {
2798 if (targetdebug)
2799 {
2800 int i;
2801
2802 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2803 numsigs);
2804
2805 for (i = 0; i < numsigs; i++)
2806 if (pass_signals[i])
2807 fprintf_unfiltered (gdb_stdlog, " %s",
2808 gdb_signal_to_name (i));
2809
2810 fprintf_unfiltered (gdb_stdlog, " })\n");
2811 }
2812
2813 (*t->to_pass_signals) (t, numsigs, pass_signals);
2814 return;
2815 }
2816 }
2817 }
2818
2819 void
2820 target_program_signals (int numsigs, unsigned char *program_signals)
2821 {
2822 struct target_ops *t;
2823
2824 for (t = current_target.beneath; t != NULL; t = t->beneath)
2825 {
2826 if (t->to_program_signals != NULL)
2827 {
2828 if (targetdebug)
2829 {
2830 int i;
2831
2832 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2833 numsigs);
2834
2835 for (i = 0; i < numsigs; i++)
2836 if (program_signals[i])
2837 fprintf_unfiltered (gdb_stdlog, " %s",
2838 gdb_signal_to_name (i));
2839
2840 fprintf_unfiltered (gdb_stdlog, " })\n");
2841 }
2842
2843 (*t->to_program_signals) (t, numsigs, program_signals);
2844 return;
2845 }
2846 }
2847 }
2848
2849 /* Look through the list of possible targets for a target that can
2850 follow forks. */
2851
2852 int
2853 target_follow_fork (int follow_child, int detach_fork)
2854 {
2855 struct target_ops *t;
2856
2857 for (t = current_target.beneath; t != NULL; t = t->beneath)
2858 {
2859 if (t->to_follow_fork != NULL)
2860 {
2861 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2862
2863 if (targetdebug)
2864 fprintf_unfiltered (gdb_stdlog,
2865 "target_follow_fork (%d, %d) = %d\n",
2866 follow_child, detach_fork, retval);
2867 return retval;
2868 }
2869 }
2870
2871 /* Some target returned a fork event, but did not know how to follow it. */
2872 internal_error (__FILE__, __LINE__,
2873 _("could not find a target to follow fork"));
2874 }
2875
2876 void
2877 target_mourn_inferior (void)
2878 {
2879 struct target_ops *t;
2880
2881 for (t = current_target.beneath; t != NULL; t = t->beneath)
2882 {
2883 if (t->to_mourn_inferior != NULL)
2884 {
2885 t->to_mourn_inferior (t);
2886 if (targetdebug)
2887 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2888
2889 /* We no longer need to keep handles on any of the object files.
2890 Make sure to release them to avoid unnecessarily locking any
2891 of them while we're not actually debugging. */
2892 bfd_cache_close_all ();
2893
2894 return;
2895 }
2896 }
2897
2898 internal_error (__FILE__, __LINE__,
2899 _("could not find a target to follow mourn inferior"));
2900 }
2901
2902 /* Look for a target which can describe architectural features, starting
2903 from TARGET. If we find one, return its description. */
2904
2905 const struct target_desc *
2906 target_read_description (struct target_ops *target)
2907 {
2908 struct target_ops *t;
2909
2910 for (t = target; t != NULL; t = t->beneath)
2911 if (t->to_read_description != NULL)
2912 {
2913 const struct target_desc *tdesc;
2914
2915 tdesc = t->to_read_description (t);
2916 if (tdesc)
2917 return tdesc;
2918 }
2919
2920 return NULL;
2921 }
2922
2923 /* The default implementation of to_search_memory.
2924 This implements a basic search of memory, reading target memory and
2925 performing the search here (as opposed to performing the search in on the
2926 target side with, for example, gdbserver). */
2927
2928 int
2929 simple_search_memory (struct target_ops *ops,
2930 CORE_ADDR start_addr, ULONGEST search_space_len,
2931 const gdb_byte *pattern, ULONGEST pattern_len,
2932 CORE_ADDR *found_addrp)
2933 {
2934 /* NOTE: also defined in find.c testcase. */
2935 #define SEARCH_CHUNK_SIZE 16000
2936 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2937 /* Buffer to hold memory contents for searching. */
2938 gdb_byte *search_buf;
2939 unsigned search_buf_size;
2940 struct cleanup *old_cleanups;
2941
2942 search_buf_size = chunk_size + pattern_len - 1;
2943
2944 /* No point in trying to allocate a buffer larger than the search space. */
2945 if (search_space_len < search_buf_size)
2946 search_buf_size = search_space_len;
2947
2948 search_buf = malloc (search_buf_size);
2949 if (search_buf == NULL)
2950 error (_("Unable to allocate memory to perform the search."));
2951 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2952
2953 /* Prime the search buffer. */
2954
2955 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2956 search_buf, start_addr, search_buf_size) != search_buf_size)
2957 {
2958 warning (_("Unable to access %s bytes of target "
2959 "memory at %s, halting search."),
2960 pulongest (search_buf_size), hex_string (start_addr));
2961 do_cleanups (old_cleanups);
2962 return -1;
2963 }
2964
2965 /* Perform the search.
2966
2967 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2968 When we've scanned N bytes we copy the trailing bytes to the start and
2969 read in another N bytes. */
2970
2971 while (search_space_len >= pattern_len)
2972 {
2973 gdb_byte *found_ptr;
2974 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2975
2976 found_ptr = memmem (search_buf, nr_search_bytes,
2977 pattern, pattern_len);
2978
2979 if (found_ptr != NULL)
2980 {
2981 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2982
2983 *found_addrp = found_addr;
2984 do_cleanups (old_cleanups);
2985 return 1;
2986 }
2987
2988 /* Not found in this chunk, skip to next chunk. */
2989
2990 /* Don't let search_space_len wrap here, it's unsigned. */
2991 if (search_space_len >= chunk_size)
2992 search_space_len -= chunk_size;
2993 else
2994 search_space_len = 0;
2995
2996 if (search_space_len >= pattern_len)
2997 {
2998 unsigned keep_len = search_buf_size - chunk_size;
2999 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3000 int nr_to_read;
3001
3002 /* Copy the trailing part of the previous iteration to the front
3003 of the buffer for the next iteration. */
3004 gdb_assert (keep_len == pattern_len - 1);
3005 memcpy (search_buf, search_buf + chunk_size, keep_len);
3006
3007 nr_to_read = min (search_space_len - keep_len, chunk_size);
3008
3009 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3010 search_buf + keep_len, read_addr,
3011 nr_to_read) != nr_to_read)
3012 {
3013 warning (_("Unable to access %s bytes of target "
3014 "memory at %s, halting search."),
3015 plongest (nr_to_read),
3016 hex_string (read_addr));
3017 do_cleanups (old_cleanups);
3018 return -1;
3019 }
3020
3021 start_addr += chunk_size;
3022 }
3023 }
3024
3025 /* Not found. */
3026
3027 do_cleanups (old_cleanups);
3028 return 0;
3029 }
3030
3031 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3032 sequence of bytes in PATTERN with length PATTERN_LEN.
3033
3034 The result is 1 if found, 0 if not found, and -1 if there was an error
3035 requiring halting of the search (e.g. memory read error).
3036 If the pattern is found the address is recorded in FOUND_ADDRP. */
3037
3038 int
3039 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3040 const gdb_byte *pattern, ULONGEST pattern_len,
3041 CORE_ADDR *found_addrp)
3042 {
3043 struct target_ops *t;
3044 int found;
3045
3046 /* We don't use INHERIT to set current_target.to_search_memory,
3047 so we have to scan the target stack and handle targetdebug
3048 ourselves. */
3049
3050 if (targetdebug)
3051 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3052 hex_string (start_addr));
3053
3054 for (t = current_target.beneath; t != NULL; t = t->beneath)
3055 if (t->to_search_memory != NULL)
3056 break;
3057
3058 if (t != NULL)
3059 {
3060 found = t->to_search_memory (t, start_addr, search_space_len,
3061 pattern, pattern_len, found_addrp);
3062 }
3063 else
3064 {
3065 /* If a special version of to_search_memory isn't available, use the
3066 simple version. */
3067 found = simple_search_memory (current_target.beneath,
3068 start_addr, search_space_len,
3069 pattern, pattern_len, found_addrp);
3070 }
3071
3072 if (targetdebug)
3073 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3074
3075 return found;
3076 }
3077
3078 /* Look through the currently pushed targets. If none of them will
3079 be able to restart the currently running process, issue an error
3080 message. */
3081
3082 void
3083 target_require_runnable (void)
3084 {
3085 struct target_ops *t;
3086
3087 for (t = target_stack; t != NULL; t = t->beneath)
3088 {
3089 /* If this target knows how to create a new program, then
3090 assume we will still be able to after killing the current
3091 one. Either killing and mourning will not pop T, or else
3092 find_default_run_target will find it again. */
3093 if (t->to_create_inferior != NULL)
3094 return;
3095
3096 /* Do not worry about thread_stratum targets that can not
3097 create inferiors. Assume they will be pushed again if
3098 necessary, and continue to the process_stratum. */
3099 if (t->to_stratum == thread_stratum
3100 || t->to_stratum == arch_stratum)
3101 continue;
3102
3103 error (_("The \"%s\" target does not support \"run\". "
3104 "Try \"help target\" or \"continue\"."),
3105 t->to_shortname);
3106 }
3107
3108 /* This function is only called if the target is running. In that
3109 case there should have been a process_stratum target and it
3110 should either know how to create inferiors, or not... */
3111 internal_error (__FILE__, __LINE__, _("No targets found"));
3112 }
3113
3114 /* Look through the list of possible targets for a target that can
3115 execute a run or attach command without any other data. This is
3116 used to locate the default process stratum.
3117
3118 If DO_MESG is not NULL, the result is always valid (error() is
3119 called for errors); else, return NULL on error. */
3120
3121 static struct target_ops *
3122 find_default_run_target (char *do_mesg)
3123 {
3124 struct target_ops **t;
3125 struct target_ops *runable = NULL;
3126 int count;
3127
3128 count = 0;
3129
3130 for (t = target_structs; t < target_structs + target_struct_size;
3131 ++t)
3132 {
3133 if ((*t)->to_can_run && target_can_run (*t))
3134 {
3135 runable = *t;
3136 ++count;
3137 }
3138 }
3139
3140 if (count != 1)
3141 {
3142 if (do_mesg)
3143 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3144 else
3145 return NULL;
3146 }
3147
3148 return runable;
3149 }
3150
3151 void
3152 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3153 {
3154 struct target_ops *t;
3155
3156 t = find_default_run_target ("attach");
3157 (t->to_attach) (t, args, from_tty);
3158 return;
3159 }
3160
3161 void
3162 find_default_create_inferior (struct target_ops *ops,
3163 char *exec_file, char *allargs, char **env,
3164 int from_tty)
3165 {
3166 struct target_ops *t;
3167
3168 t = find_default_run_target ("run");
3169 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3170 return;
3171 }
3172
3173 static int
3174 find_default_can_async_p (struct target_ops *ignore)
3175 {
3176 struct target_ops *t;
3177
3178 /* This may be called before the target is pushed on the stack;
3179 look for the default process stratum. If there's none, gdb isn't
3180 configured with a native debugger, and target remote isn't
3181 connected yet. */
3182 t = find_default_run_target (NULL);
3183 if (t && t->to_can_async_p != delegate_can_async_p)
3184 return (t->to_can_async_p) (t);
3185 return 0;
3186 }
3187
3188 static int
3189 find_default_is_async_p (struct target_ops *ignore)
3190 {
3191 struct target_ops *t;
3192
3193 /* This may be called before the target is pushed on the stack;
3194 look for the default process stratum. If there's none, gdb isn't
3195 configured with a native debugger, and target remote isn't
3196 connected yet. */
3197 t = find_default_run_target (NULL);
3198 if (t && t->to_is_async_p != delegate_is_async_p)
3199 return (t->to_is_async_p) (t);
3200 return 0;
3201 }
3202
3203 static int
3204 find_default_supports_non_stop (struct target_ops *self)
3205 {
3206 struct target_ops *t;
3207
3208 t = find_default_run_target (NULL);
3209 if (t && t->to_supports_non_stop)
3210 return (t->to_supports_non_stop) (t);
3211 return 0;
3212 }
3213
3214 int
3215 target_supports_non_stop (void)
3216 {
3217 struct target_ops *t;
3218
3219 for (t = &current_target; t != NULL; t = t->beneath)
3220 if (t->to_supports_non_stop)
3221 return t->to_supports_non_stop (t);
3222
3223 return 0;
3224 }
3225
3226 /* Implement the "info proc" command. */
3227
3228 int
3229 target_info_proc (char *args, enum info_proc_what what)
3230 {
3231 struct target_ops *t;
3232
3233 /* If we're already connected to something that can get us OS
3234 related data, use it. Otherwise, try using the native
3235 target. */
3236 if (current_target.to_stratum >= process_stratum)
3237 t = current_target.beneath;
3238 else
3239 t = find_default_run_target (NULL);
3240
3241 for (; t != NULL; t = t->beneath)
3242 {
3243 if (t->to_info_proc != NULL)
3244 {
3245 t->to_info_proc (t, args, what);
3246
3247 if (targetdebug)
3248 fprintf_unfiltered (gdb_stdlog,
3249 "target_info_proc (\"%s\", %d)\n", args, what);
3250
3251 return 1;
3252 }
3253 }
3254
3255 return 0;
3256 }
3257
3258 static int
3259 find_default_supports_disable_randomization (struct target_ops *self)
3260 {
3261 struct target_ops *t;
3262
3263 t = find_default_run_target (NULL);
3264 if (t && t->to_supports_disable_randomization)
3265 return (t->to_supports_disable_randomization) (t);
3266 return 0;
3267 }
3268
3269 int
3270 target_supports_disable_randomization (void)
3271 {
3272 struct target_ops *t;
3273
3274 for (t = &current_target; t != NULL; t = t->beneath)
3275 if (t->to_supports_disable_randomization)
3276 return t->to_supports_disable_randomization (t);
3277
3278 return 0;
3279 }
3280
3281 char *
3282 target_get_osdata (const char *type)
3283 {
3284 struct target_ops *t;
3285
3286 /* If we're already connected to something that can get us OS
3287 related data, use it. Otherwise, try using the native
3288 target. */
3289 if (current_target.to_stratum >= process_stratum)
3290 t = current_target.beneath;
3291 else
3292 t = find_default_run_target ("get OS data");
3293
3294 if (!t)
3295 return NULL;
3296
3297 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3298 }
3299
3300 /* Determine the current address space of thread PTID. */
3301
3302 struct address_space *
3303 target_thread_address_space (ptid_t ptid)
3304 {
3305 struct address_space *aspace;
3306 struct inferior *inf;
3307 struct target_ops *t;
3308
3309 for (t = current_target.beneath; t != NULL; t = t->beneath)
3310 {
3311 if (t->to_thread_address_space != NULL)
3312 {
3313 aspace = t->to_thread_address_space (t, ptid);
3314 gdb_assert (aspace);
3315
3316 if (targetdebug)
3317 fprintf_unfiltered (gdb_stdlog,
3318 "target_thread_address_space (%s) = %d\n",
3319 target_pid_to_str (ptid),
3320 address_space_num (aspace));
3321 return aspace;
3322 }
3323 }
3324
3325 /* Fall-back to the "main" address space of the inferior. */
3326 inf = find_inferior_pid (ptid_get_pid (ptid));
3327
3328 if (inf == NULL || inf->aspace == NULL)
3329 internal_error (__FILE__, __LINE__,
3330 _("Can't determine the current "
3331 "address space of thread %s\n"),
3332 target_pid_to_str (ptid));
3333
3334 return inf->aspace;
3335 }
3336
3337
3338 /* Target file operations. */
3339
3340 static struct target_ops *
3341 default_fileio_target (void)
3342 {
3343 /* If we're already connected to something that can perform
3344 file I/O, use it. Otherwise, try using the native target. */
3345 if (current_target.to_stratum >= process_stratum)
3346 return current_target.beneath;
3347 else
3348 return find_default_run_target ("file I/O");
3349 }
3350
3351 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3352 target file descriptor, or -1 if an error occurs (and set
3353 *TARGET_ERRNO). */
3354 int
3355 target_fileio_open (const char *filename, int flags, int mode,
3356 int *target_errno)
3357 {
3358 struct target_ops *t;
3359
3360 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3361 {
3362 if (t->to_fileio_open != NULL)
3363 {
3364 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3365
3366 if (targetdebug)
3367 fprintf_unfiltered (gdb_stdlog,
3368 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3369 filename, flags, mode,
3370 fd, fd != -1 ? 0 : *target_errno);
3371 return fd;
3372 }
3373 }
3374
3375 *target_errno = FILEIO_ENOSYS;
3376 return -1;
3377 }
3378
3379 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3380 Return the number of bytes written, or -1 if an error occurs
3381 (and set *TARGET_ERRNO). */
3382 int
3383 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3384 ULONGEST offset, int *target_errno)
3385 {
3386 struct target_ops *t;
3387
3388 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3389 {
3390 if (t->to_fileio_pwrite != NULL)
3391 {
3392 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3393 target_errno);
3394
3395 if (targetdebug)
3396 fprintf_unfiltered (gdb_stdlog,
3397 "target_fileio_pwrite (%d,...,%d,%s) "
3398 "= %d (%d)\n",
3399 fd, len, pulongest (offset),
3400 ret, ret != -1 ? 0 : *target_errno);
3401 return ret;
3402 }
3403 }
3404
3405 *target_errno = FILEIO_ENOSYS;
3406 return -1;
3407 }
3408
3409 /* Read up to LEN bytes FD on the target into READ_BUF.
3410 Return the number of bytes read, or -1 if an error occurs
3411 (and set *TARGET_ERRNO). */
3412 int
3413 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3414 ULONGEST offset, int *target_errno)
3415 {
3416 struct target_ops *t;
3417
3418 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3419 {
3420 if (t->to_fileio_pread != NULL)
3421 {
3422 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3423 target_errno);
3424
3425 if (targetdebug)
3426 fprintf_unfiltered (gdb_stdlog,
3427 "target_fileio_pread (%d,...,%d,%s) "
3428 "= %d (%d)\n",
3429 fd, len, pulongest (offset),
3430 ret, ret != -1 ? 0 : *target_errno);
3431 return ret;
3432 }
3433 }
3434
3435 *target_errno = FILEIO_ENOSYS;
3436 return -1;
3437 }
3438
3439 /* Close FD on the target. Return 0, or -1 if an error occurs
3440 (and set *TARGET_ERRNO). */
3441 int
3442 target_fileio_close (int fd, int *target_errno)
3443 {
3444 struct target_ops *t;
3445
3446 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3447 {
3448 if (t->to_fileio_close != NULL)
3449 {
3450 int ret = t->to_fileio_close (t, fd, target_errno);
3451
3452 if (targetdebug)
3453 fprintf_unfiltered (gdb_stdlog,
3454 "target_fileio_close (%d) = %d (%d)\n",
3455 fd, ret, ret != -1 ? 0 : *target_errno);
3456 return ret;
3457 }
3458 }
3459
3460 *target_errno = FILEIO_ENOSYS;
3461 return -1;
3462 }
3463
3464 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3465 occurs (and set *TARGET_ERRNO). */
3466 int
3467 target_fileio_unlink (const char *filename, int *target_errno)
3468 {
3469 struct target_ops *t;
3470
3471 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3472 {
3473 if (t->to_fileio_unlink != NULL)
3474 {
3475 int ret = t->to_fileio_unlink (t, filename, target_errno);
3476
3477 if (targetdebug)
3478 fprintf_unfiltered (gdb_stdlog,
3479 "target_fileio_unlink (%s) = %d (%d)\n",
3480 filename, ret, ret != -1 ? 0 : *target_errno);
3481 return ret;
3482 }
3483 }
3484
3485 *target_errno = FILEIO_ENOSYS;
3486 return -1;
3487 }
3488
3489 /* Read value of symbolic link FILENAME on the target. Return a
3490 null-terminated string allocated via xmalloc, or NULL if an error
3491 occurs (and set *TARGET_ERRNO). */
3492 char *
3493 target_fileio_readlink (const char *filename, int *target_errno)
3494 {
3495 struct target_ops *t;
3496
3497 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3498 {
3499 if (t->to_fileio_readlink != NULL)
3500 {
3501 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3502
3503 if (targetdebug)
3504 fprintf_unfiltered (gdb_stdlog,
3505 "target_fileio_readlink (%s) = %s (%d)\n",
3506 filename, ret? ret : "(nil)",
3507 ret? 0 : *target_errno);
3508 return ret;
3509 }
3510 }
3511
3512 *target_errno = FILEIO_ENOSYS;
3513 return NULL;
3514 }
3515
3516 static void
3517 target_fileio_close_cleanup (void *opaque)
3518 {
3519 int fd = *(int *) opaque;
3520 int target_errno;
3521
3522 target_fileio_close (fd, &target_errno);
3523 }
3524
3525 /* Read target file FILENAME. Store the result in *BUF_P and
3526 return the size of the transferred data. PADDING additional bytes are
3527 available in *BUF_P. This is a helper function for
3528 target_fileio_read_alloc; see the declaration of that function for more
3529 information. */
3530
3531 static LONGEST
3532 target_fileio_read_alloc_1 (const char *filename,
3533 gdb_byte **buf_p, int padding)
3534 {
3535 struct cleanup *close_cleanup;
3536 size_t buf_alloc, buf_pos;
3537 gdb_byte *buf;
3538 LONGEST n;
3539 int fd;
3540 int target_errno;
3541
3542 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3543 if (fd == -1)
3544 return -1;
3545
3546 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3547
3548 /* Start by reading up to 4K at a time. The target will throttle
3549 this number down if necessary. */
3550 buf_alloc = 4096;
3551 buf = xmalloc (buf_alloc);
3552 buf_pos = 0;
3553 while (1)
3554 {
3555 n = target_fileio_pread (fd, &buf[buf_pos],
3556 buf_alloc - buf_pos - padding, buf_pos,
3557 &target_errno);
3558 if (n < 0)
3559 {
3560 /* An error occurred. */
3561 do_cleanups (close_cleanup);
3562 xfree (buf);
3563 return -1;
3564 }
3565 else if (n == 0)
3566 {
3567 /* Read all there was. */
3568 do_cleanups (close_cleanup);
3569 if (buf_pos == 0)
3570 xfree (buf);
3571 else
3572 *buf_p = buf;
3573 return buf_pos;
3574 }
3575
3576 buf_pos += n;
3577
3578 /* If the buffer is filling up, expand it. */
3579 if (buf_alloc < buf_pos * 2)
3580 {
3581 buf_alloc *= 2;
3582 buf = xrealloc (buf, buf_alloc);
3583 }
3584
3585 QUIT;
3586 }
3587 }
3588
3589 /* Read target file FILENAME. Store the result in *BUF_P and return
3590 the size of the transferred data. See the declaration in "target.h"
3591 function for more information about the return value. */
3592
3593 LONGEST
3594 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3595 {
3596 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3597 }
3598
3599 /* Read target file FILENAME. The result is NUL-terminated and
3600 returned as a string, allocated using xmalloc. If an error occurs
3601 or the transfer is unsupported, NULL is returned. Empty objects
3602 are returned as allocated but empty strings. A warning is issued
3603 if the result contains any embedded NUL bytes. */
3604
3605 char *
3606 target_fileio_read_stralloc (const char *filename)
3607 {
3608 gdb_byte *buffer;
3609 char *bufstr;
3610 LONGEST i, transferred;
3611
3612 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3613 bufstr = (char *) buffer;
3614
3615 if (transferred < 0)
3616 return NULL;
3617
3618 if (transferred == 0)
3619 return xstrdup ("");
3620
3621 bufstr[transferred] = 0;
3622
3623 /* Check for embedded NUL bytes; but allow trailing NULs. */
3624 for (i = strlen (bufstr); i < transferred; i++)
3625 if (bufstr[i] != 0)
3626 {
3627 warning (_("target file %s "
3628 "contained unexpected null characters"),
3629 filename);
3630 break;
3631 }
3632
3633 return bufstr;
3634 }
3635
3636
3637 static int
3638 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3639 CORE_ADDR addr, int len)
3640 {
3641 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3642 }
3643
3644 static int
3645 default_watchpoint_addr_within_range (struct target_ops *target,
3646 CORE_ADDR addr,
3647 CORE_ADDR start, int length)
3648 {
3649 return addr >= start && addr < start + length;
3650 }
3651
3652 static struct gdbarch *
3653 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3654 {
3655 return target_gdbarch ();
3656 }
3657
3658 static int
3659 return_zero (void)
3660 {
3661 return 0;
3662 }
3663
3664 static int
3665 return_one (void)
3666 {
3667 return 1;
3668 }
3669
3670 static int
3671 return_minus_one (void)
3672 {
3673 return -1;
3674 }
3675
3676 static void *
3677 return_null (void)
3678 {
3679 return 0;
3680 }
3681
3682 /*
3683 * Find the next target down the stack from the specified target.
3684 */
3685
3686 struct target_ops *
3687 find_target_beneath (struct target_ops *t)
3688 {
3689 return t->beneath;
3690 }
3691
3692 /* See target.h. */
3693
3694 struct target_ops *
3695 find_target_at (enum strata stratum)
3696 {
3697 struct target_ops *t;
3698
3699 for (t = current_target.beneath; t != NULL; t = t->beneath)
3700 if (t->to_stratum == stratum)
3701 return t;
3702
3703 return NULL;
3704 }
3705
3706 \f
3707 /* The inferior process has died. Long live the inferior! */
3708
3709 void
3710 generic_mourn_inferior (void)
3711 {
3712 ptid_t ptid;
3713
3714 ptid = inferior_ptid;
3715 inferior_ptid = null_ptid;
3716
3717 /* Mark breakpoints uninserted in case something tries to delete a
3718 breakpoint while we delete the inferior's threads (which would
3719 fail, since the inferior is long gone). */
3720 mark_breakpoints_out ();
3721
3722 if (!ptid_equal (ptid, null_ptid))
3723 {
3724 int pid = ptid_get_pid (ptid);
3725 exit_inferior (pid);
3726 }
3727
3728 /* Note this wipes step-resume breakpoints, so needs to be done
3729 after exit_inferior, which ends up referencing the step-resume
3730 breakpoints through clear_thread_inferior_resources. */
3731 breakpoint_init_inferior (inf_exited);
3732
3733 registers_changed ();
3734
3735 reopen_exec_file ();
3736 reinit_frame_cache ();
3737
3738 if (deprecated_detach_hook)
3739 deprecated_detach_hook ();
3740 }
3741 \f
3742 /* Convert a normal process ID to a string. Returns the string in a
3743 static buffer. */
3744
3745 char *
3746 normal_pid_to_str (ptid_t ptid)
3747 {
3748 static char buf[32];
3749
3750 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3751 return buf;
3752 }
3753
3754 static char *
3755 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3756 {
3757 return normal_pid_to_str (ptid);
3758 }
3759
3760 /* Error-catcher for target_find_memory_regions. */
3761 static int
3762 dummy_find_memory_regions (struct target_ops *self,
3763 find_memory_region_ftype ignore1, void *ignore2)
3764 {
3765 error (_("Command not implemented for this target."));
3766 return 0;
3767 }
3768
3769 /* Error-catcher for target_make_corefile_notes. */
3770 static char *
3771 dummy_make_corefile_notes (struct target_ops *self,
3772 bfd *ignore1, int *ignore2)
3773 {
3774 error (_("Command not implemented for this target."));
3775 return NULL;
3776 }
3777
3778 /* Error-catcher for target_get_bookmark. */
3779 static gdb_byte *
3780 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3781 {
3782 tcomplain ();
3783 return NULL;
3784 }
3785
3786 /* Error-catcher for target_goto_bookmark. */
3787 static void
3788 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3789 {
3790 tcomplain ();
3791 }
3792
3793 /* Set up the handful of non-empty slots needed by the dummy target
3794 vector. */
3795
3796 static void
3797 init_dummy_target (void)
3798 {
3799 dummy_target.to_shortname = "None";
3800 dummy_target.to_longname = "None";
3801 dummy_target.to_doc = "";
3802 dummy_target.to_create_inferior = find_default_create_inferior;
3803 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3804 dummy_target.to_supports_disable_randomization
3805 = find_default_supports_disable_randomization;
3806 dummy_target.to_pid_to_str = dummy_pid_to_str;
3807 dummy_target.to_stratum = dummy_stratum;
3808 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3809 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3810 dummy_target.to_get_bookmark = dummy_get_bookmark;
3811 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3812 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3813 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3814 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3815 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3816 dummy_target.to_has_execution
3817 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3818 dummy_target.to_magic = OPS_MAGIC;
3819
3820 install_dummy_methods (&dummy_target);
3821 }
3822 \f
3823 static void
3824 debug_to_open (char *args, int from_tty)
3825 {
3826 debug_target.to_open (args, from_tty);
3827
3828 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3829 }
3830
3831 void
3832 target_close (struct target_ops *targ)
3833 {
3834 gdb_assert (!target_is_pushed (targ));
3835
3836 if (targ->to_xclose != NULL)
3837 targ->to_xclose (targ);
3838 else if (targ->to_close != NULL)
3839 targ->to_close (targ);
3840
3841 if (targetdebug)
3842 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3843 }
3844
3845 void
3846 target_attach (char *args, int from_tty)
3847 {
3848 current_target.to_attach (&current_target, args, from_tty);
3849 if (targetdebug)
3850 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3851 args, from_tty);
3852 }
3853
3854 int
3855 target_thread_alive (ptid_t ptid)
3856 {
3857 struct target_ops *t;
3858
3859 for (t = current_target.beneath; t != NULL; t = t->beneath)
3860 {
3861 if (t->to_thread_alive != NULL)
3862 {
3863 int retval;
3864
3865 retval = t->to_thread_alive (t, ptid);
3866 if (targetdebug)
3867 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3868 ptid_get_pid (ptid), retval);
3869
3870 return retval;
3871 }
3872 }
3873
3874 return 0;
3875 }
3876
3877 void
3878 target_find_new_threads (void)
3879 {
3880 struct target_ops *t;
3881
3882 for (t = current_target.beneath; t != NULL; t = t->beneath)
3883 {
3884 if (t->to_find_new_threads != NULL)
3885 {
3886 t->to_find_new_threads (t);
3887 if (targetdebug)
3888 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3889
3890 return;
3891 }
3892 }
3893 }
3894
3895 void
3896 target_stop (ptid_t ptid)
3897 {
3898 if (!may_stop)
3899 {
3900 warning (_("May not interrupt or stop the target, ignoring attempt"));
3901 return;
3902 }
3903
3904 (*current_target.to_stop) (&current_target, ptid);
3905 }
3906
3907 static void
3908 debug_to_post_attach (struct target_ops *self, int pid)
3909 {
3910 debug_target.to_post_attach (&debug_target, pid);
3911
3912 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3913 }
3914
3915 /* Concatenate ELEM to LIST, a comma separate list, and return the
3916 result. The LIST incoming argument is released. */
3917
3918 static char *
3919 str_comma_list_concat_elem (char *list, const char *elem)
3920 {
3921 if (list == NULL)
3922 return xstrdup (elem);
3923 else
3924 return reconcat (list, list, ", ", elem, (char *) NULL);
3925 }
3926
3927 /* Helper for target_options_to_string. If OPT is present in
3928 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3929 Returns the new resulting string. OPT is removed from
3930 TARGET_OPTIONS. */
3931
3932 static char *
3933 do_option (int *target_options, char *ret,
3934 int opt, char *opt_str)
3935 {
3936 if ((*target_options & opt) != 0)
3937 {
3938 ret = str_comma_list_concat_elem (ret, opt_str);
3939 *target_options &= ~opt;
3940 }
3941
3942 return ret;
3943 }
3944
3945 char *
3946 target_options_to_string (int target_options)
3947 {
3948 char *ret = NULL;
3949
3950 #define DO_TARG_OPTION(OPT) \
3951 ret = do_option (&target_options, ret, OPT, #OPT)
3952
3953 DO_TARG_OPTION (TARGET_WNOHANG);
3954
3955 if (target_options != 0)
3956 ret = str_comma_list_concat_elem (ret, "unknown???");
3957
3958 if (ret == NULL)
3959 ret = xstrdup ("");
3960 return ret;
3961 }
3962
3963 static void
3964 debug_print_register (const char * func,
3965 struct regcache *regcache, int regno)
3966 {
3967 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3968
3969 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3970 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3971 && gdbarch_register_name (gdbarch, regno) != NULL
3972 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3973 fprintf_unfiltered (gdb_stdlog, "(%s)",
3974 gdbarch_register_name (gdbarch, regno));
3975 else
3976 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3977 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3978 {
3979 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3980 int i, size = register_size (gdbarch, regno);
3981 gdb_byte buf[MAX_REGISTER_SIZE];
3982
3983 regcache_raw_collect (regcache, regno, buf);
3984 fprintf_unfiltered (gdb_stdlog, " = ");
3985 for (i = 0; i < size; i++)
3986 {
3987 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3988 }
3989 if (size <= sizeof (LONGEST))
3990 {
3991 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3992
3993 fprintf_unfiltered (gdb_stdlog, " %s %s",
3994 core_addr_to_string_nz (val), plongest (val));
3995 }
3996 }
3997 fprintf_unfiltered (gdb_stdlog, "\n");
3998 }
3999
4000 void
4001 target_fetch_registers (struct regcache *regcache, int regno)
4002 {
4003 struct target_ops *t;
4004
4005 for (t = current_target.beneath; t != NULL; t = t->beneath)
4006 {
4007 if (t->to_fetch_registers != NULL)
4008 {
4009 t->to_fetch_registers (t, regcache, regno);
4010 if (targetdebug)
4011 debug_print_register ("target_fetch_registers", regcache, regno);
4012 return;
4013 }
4014 }
4015 }
4016
4017 void
4018 target_store_registers (struct regcache *regcache, int regno)
4019 {
4020 struct target_ops *t;
4021
4022 if (!may_write_registers)
4023 error (_("Writing to registers is not allowed (regno %d)"), regno);
4024
4025 current_target.to_store_registers (&current_target, regcache, regno);
4026 if (targetdebug)
4027 {
4028 debug_print_register ("target_store_registers", regcache, regno);
4029 }
4030 }
4031
4032 int
4033 target_core_of_thread (ptid_t ptid)
4034 {
4035 struct target_ops *t;
4036
4037 for (t = current_target.beneath; t != NULL; t = t->beneath)
4038 {
4039 if (t->to_core_of_thread != NULL)
4040 {
4041 int retval = t->to_core_of_thread (t, ptid);
4042
4043 if (targetdebug)
4044 fprintf_unfiltered (gdb_stdlog,
4045 "target_core_of_thread (%d) = %d\n",
4046 ptid_get_pid (ptid), retval);
4047 return retval;
4048 }
4049 }
4050
4051 return -1;
4052 }
4053
4054 int
4055 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4056 {
4057 struct target_ops *t;
4058
4059 for (t = current_target.beneath; t != NULL; t = t->beneath)
4060 {
4061 if (t->to_verify_memory != NULL)
4062 {
4063 int retval = t->to_verify_memory (t, data, memaddr, size);
4064
4065 if (targetdebug)
4066 fprintf_unfiltered (gdb_stdlog,
4067 "target_verify_memory (%s, %s) = %d\n",
4068 paddress (target_gdbarch (), memaddr),
4069 pulongest (size),
4070 retval);
4071 return retval;
4072 }
4073 }
4074
4075 tcomplain ();
4076 }
4077
4078 /* The documentation for this function is in its prototype declaration in
4079 target.h. */
4080
4081 int
4082 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4083 {
4084 struct target_ops *t;
4085
4086 for (t = current_target.beneath; t != NULL; t = t->beneath)
4087 if (t->to_insert_mask_watchpoint != NULL)
4088 {
4089 int ret;
4090
4091 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4092
4093 if (targetdebug)
4094 fprintf_unfiltered (gdb_stdlog, "\
4095 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4096 core_addr_to_string (addr),
4097 core_addr_to_string (mask), rw, ret);
4098
4099 return ret;
4100 }
4101
4102 return 1;
4103 }
4104
4105 /* The documentation for this function is in its prototype declaration in
4106 target.h. */
4107
4108 int
4109 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4110 {
4111 struct target_ops *t;
4112
4113 for (t = current_target.beneath; t != NULL; t = t->beneath)
4114 if (t->to_remove_mask_watchpoint != NULL)
4115 {
4116 int ret;
4117
4118 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4119
4120 if (targetdebug)
4121 fprintf_unfiltered (gdb_stdlog, "\
4122 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4123 core_addr_to_string (addr),
4124 core_addr_to_string (mask), rw, ret);
4125
4126 return ret;
4127 }
4128
4129 return 1;
4130 }
4131
4132 /* The documentation for this function is in its prototype declaration
4133 in target.h. */
4134
4135 int
4136 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4137 {
4138 struct target_ops *t;
4139
4140 for (t = current_target.beneath; t != NULL; t = t->beneath)
4141 if (t->to_masked_watch_num_registers != NULL)
4142 return t->to_masked_watch_num_registers (t, addr, mask);
4143
4144 return -1;
4145 }
4146
4147 /* The documentation for this function is in its prototype declaration
4148 in target.h. */
4149
4150 int
4151 target_ranged_break_num_registers (void)
4152 {
4153 struct target_ops *t;
4154
4155 for (t = current_target.beneath; t != NULL; t = t->beneath)
4156 if (t->to_ranged_break_num_registers != NULL)
4157 return t->to_ranged_break_num_registers (t);
4158
4159 return -1;
4160 }
4161
4162 /* See target.h. */
4163
4164 struct btrace_target_info *
4165 target_enable_btrace (ptid_t ptid)
4166 {
4167 struct target_ops *t;
4168
4169 for (t = current_target.beneath; t != NULL; t = t->beneath)
4170 if (t->to_enable_btrace != NULL)
4171 return t->to_enable_btrace (t, ptid);
4172
4173 tcomplain ();
4174 return NULL;
4175 }
4176
4177 /* See target.h. */
4178
4179 void
4180 target_disable_btrace (struct btrace_target_info *btinfo)
4181 {
4182 struct target_ops *t;
4183
4184 for (t = current_target.beneath; t != NULL; t = t->beneath)
4185 if (t->to_disable_btrace != NULL)
4186 {
4187 t->to_disable_btrace (t, btinfo);
4188 return;
4189 }
4190
4191 tcomplain ();
4192 }
4193
4194 /* See target.h. */
4195
4196 void
4197 target_teardown_btrace (struct btrace_target_info *btinfo)
4198 {
4199 struct target_ops *t;
4200
4201 for (t = current_target.beneath; t != NULL; t = t->beneath)
4202 if (t->to_teardown_btrace != NULL)
4203 {
4204 t->to_teardown_btrace (t, btinfo);
4205 return;
4206 }
4207
4208 tcomplain ();
4209 }
4210
4211 /* See target.h. */
4212
4213 enum btrace_error
4214 target_read_btrace (VEC (btrace_block_s) **btrace,
4215 struct btrace_target_info *btinfo,
4216 enum btrace_read_type type)
4217 {
4218 struct target_ops *t;
4219
4220 for (t = current_target.beneath; t != NULL; t = t->beneath)
4221 if (t->to_read_btrace != NULL)
4222 return t->to_read_btrace (t, btrace, btinfo, type);
4223
4224 tcomplain ();
4225 return BTRACE_ERR_NOT_SUPPORTED;
4226 }
4227
4228 /* See target.h. */
4229
4230 void
4231 target_stop_recording (void)
4232 {
4233 struct target_ops *t;
4234
4235 for (t = current_target.beneath; t != NULL; t = t->beneath)
4236 if (t->to_stop_recording != NULL)
4237 {
4238 t->to_stop_recording (t);
4239 return;
4240 }
4241
4242 /* This is optional. */
4243 }
4244
4245 /* See target.h. */
4246
4247 void
4248 target_info_record (void)
4249 {
4250 struct target_ops *t;
4251
4252 for (t = current_target.beneath; t != NULL; t = t->beneath)
4253 if (t->to_info_record != NULL)
4254 {
4255 t->to_info_record (t);
4256 return;
4257 }
4258
4259 tcomplain ();
4260 }
4261
4262 /* See target.h. */
4263
4264 void
4265 target_save_record (const char *filename)
4266 {
4267 struct target_ops *t;
4268
4269 for (t = current_target.beneath; t != NULL; t = t->beneath)
4270 if (t->to_save_record != NULL)
4271 {
4272 t->to_save_record (t, filename);
4273 return;
4274 }
4275
4276 tcomplain ();
4277 }
4278
4279 /* See target.h. */
4280
4281 int
4282 target_supports_delete_record (void)
4283 {
4284 struct target_ops *t;
4285
4286 for (t = current_target.beneath; t != NULL; t = t->beneath)
4287 if (t->to_delete_record != NULL)
4288 return 1;
4289
4290 return 0;
4291 }
4292
4293 /* See target.h. */
4294
4295 void
4296 target_delete_record (void)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_delete_record != NULL)
4302 {
4303 t->to_delete_record (t);
4304 return;
4305 }
4306
4307 tcomplain ();
4308 }
4309
4310 /* See target.h. */
4311
4312 int
4313 target_record_is_replaying (void)
4314 {
4315 struct target_ops *t;
4316
4317 for (t = current_target.beneath; t != NULL; t = t->beneath)
4318 if (t->to_record_is_replaying != NULL)
4319 return t->to_record_is_replaying (t);
4320
4321 return 0;
4322 }
4323
4324 /* See target.h. */
4325
4326 void
4327 target_goto_record_begin (void)
4328 {
4329 struct target_ops *t;
4330
4331 for (t = current_target.beneath; t != NULL; t = t->beneath)
4332 if (t->to_goto_record_begin != NULL)
4333 {
4334 t->to_goto_record_begin (t);
4335 return;
4336 }
4337
4338 tcomplain ();
4339 }
4340
4341 /* See target.h. */
4342
4343 void
4344 target_goto_record_end (void)
4345 {
4346 struct target_ops *t;
4347
4348 for (t = current_target.beneath; t != NULL; t = t->beneath)
4349 if (t->to_goto_record_end != NULL)
4350 {
4351 t->to_goto_record_end (t);
4352 return;
4353 }
4354
4355 tcomplain ();
4356 }
4357
4358 /* See target.h. */
4359
4360 void
4361 target_goto_record (ULONGEST insn)
4362 {
4363 struct target_ops *t;
4364
4365 for (t = current_target.beneath; t != NULL; t = t->beneath)
4366 if (t->to_goto_record != NULL)
4367 {
4368 t->to_goto_record (t, insn);
4369 return;
4370 }
4371
4372 tcomplain ();
4373 }
4374
4375 /* See target.h. */
4376
4377 void
4378 target_insn_history (int size, int flags)
4379 {
4380 struct target_ops *t;
4381
4382 for (t = current_target.beneath; t != NULL; t = t->beneath)
4383 if (t->to_insn_history != NULL)
4384 {
4385 t->to_insn_history (t, size, flags);
4386 return;
4387 }
4388
4389 tcomplain ();
4390 }
4391
4392 /* See target.h. */
4393
4394 void
4395 target_insn_history_from (ULONGEST from, int size, int flags)
4396 {
4397 struct target_ops *t;
4398
4399 for (t = current_target.beneath; t != NULL; t = t->beneath)
4400 if (t->to_insn_history_from != NULL)
4401 {
4402 t->to_insn_history_from (t, from, size, flags);
4403 return;
4404 }
4405
4406 tcomplain ();
4407 }
4408
4409 /* See target.h. */
4410
4411 void
4412 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4413 {
4414 struct target_ops *t;
4415
4416 for (t = current_target.beneath; t != NULL; t = t->beneath)
4417 if (t->to_insn_history_range != NULL)
4418 {
4419 t->to_insn_history_range (t, begin, end, flags);
4420 return;
4421 }
4422
4423 tcomplain ();
4424 }
4425
4426 /* See target.h. */
4427
4428 void
4429 target_call_history (int size, int flags)
4430 {
4431 struct target_ops *t;
4432
4433 for (t = current_target.beneath; t != NULL; t = t->beneath)
4434 if (t->to_call_history != NULL)
4435 {
4436 t->to_call_history (t, size, flags);
4437 return;
4438 }
4439
4440 tcomplain ();
4441 }
4442
4443 /* See target.h. */
4444
4445 void
4446 target_call_history_from (ULONGEST begin, int size, int flags)
4447 {
4448 struct target_ops *t;
4449
4450 for (t = current_target.beneath; t != NULL; t = t->beneath)
4451 if (t->to_call_history_from != NULL)
4452 {
4453 t->to_call_history_from (t, begin, size, flags);
4454 return;
4455 }
4456
4457 tcomplain ();
4458 }
4459
4460 /* See target.h. */
4461
4462 void
4463 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4464 {
4465 struct target_ops *t;
4466
4467 for (t = current_target.beneath; t != NULL; t = t->beneath)
4468 if (t->to_call_history_range != NULL)
4469 {
4470 t->to_call_history_range (t, begin, end, flags);
4471 return;
4472 }
4473
4474 tcomplain ();
4475 }
4476
4477 static void
4478 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4479 {
4480 debug_target.to_prepare_to_store (&debug_target, regcache);
4481
4482 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4483 }
4484
4485 /* See target.h. */
4486
4487 const struct frame_unwind *
4488 target_get_unwinder (void)
4489 {
4490 struct target_ops *t;
4491
4492 for (t = current_target.beneath; t != NULL; t = t->beneath)
4493 if (t->to_get_unwinder != NULL)
4494 return t->to_get_unwinder;
4495
4496 return NULL;
4497 }
4498
4499 /* See target.h. */
4500
4501 const struct frame_unwind *
4502 target_get_tailcall_unwinder (void)
4503 {
4504 struct target_ops *t;
4505
4506 for (t = current_target.beneath; t != NULL; t = t->beneath)
4507 if (t->to_get_tailcall_unwinder != NULL)
4508 return t->to_get_tailcall_unwinder;
4509
4510 return NULL;
4511 }
4512
4513 /* See target.h. */
4514
4515 CORE_ADDR
4516 forward_target_decr_pc_after_break (struct target_ops *ops,
4517 struct gdbarch *gdbarch)
4518 {
4519 for (; ops != NULL; ops = ops->beneath)
4520 if (ops->to_decr_pc_after_break != NULL)
4521 return ops->to_decr_pc_after_break (ops, gdbarch);
4522
4523 return gdbarch_decr_pc_after_break (gdbarch);
4524 }
4525
4526 /* See target.h. */
4527
4528 CORE_ADDR
4529 target_decr_pc_after_break (struct gdbarch *gdbarch)
4530 {
4531 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4532 }
4533
4534 static int
4535 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4536 int write, struct mem_attrib *attrib,
4537 struct target_ops *target)
4538 {
4539 int retval;
4540
4541 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4542 attrib, target);
4543
4544 fprintf_unfiltered (gdb_stdlog,
4545 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4546 paddress (target_gdbarch (), memaddr), len,
4547 write ? "write" : "read", retval);
4548
4549 if (retval > 0)
4550 {
4551 int i;
4552
4553 fputs_unfiltered (", bytes =", gdb_stdlog);
4554 for (i = 0; i < retval; i++)
4555 {
4556 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4557 {
4558 if (targetdebug < 2 && i > 0)
4559 {
4560 fprintf_unfiltered (gdb_stdlog, " ...");
4561 break;
4562 }
4563 fprintf_unfiltered (gdb_stdlog, "\n");
4564 }
4565
4566 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4567 }
4568 }
4569
4570 fputc_unfiltered ('\n', gdb_stdlog);
4571
4572 return retval;
4573 }
4574
4575 static void
4576 debug_to_files_info (struct target_ops *target)
4577 {
4578 debug_target.to_files_info (target);
4579
4580 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4581 }
4582
4583 static int
4584 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4585 struct bp_target_info *bp_tgt)
4586 {
4587 int retval;
4588
4589 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4590
4591 fprintf_unfiltered (gdb_stdlog,
4592 "target_insert_breakpoint (%s, xxx) = %ld\n",
4593 core_addr_to_string (bp_tgt->placed_address),
4594 (unsigned long) retval);
4595 return retval;
4596 }
4597
4598 static int
4599 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4600 struct bp_target_info *bp_tgt)
4601 {
4602 int retval;
4603
4604 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4605
4606 fprintf_unfiltered (gdb_stdlog,
4607 "target_remove_breakpoint (%s, xxx) = %ld\n",
4608 core_addr_to_string (bp_tgt->placed_address),
4609 (unsigned long) retval);
4610 return retval;
4611 }
4612
4613 static int
4614 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4615 int type, int cnt, int from_tty)
4616 {
4617 int retval;
4618
4619 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4620 type, cnt, from_tty);
4621
4622 fprintf_unfiltered (gdb_stdlog,
4623 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4624 (unsigned long) type,
4625 (unsigned long) cnt,
4626 (unsigned long) from_tty,
4627 (unsigned long) retval);
4628 return retval;
4629 }
4630
4631 static int
4632 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4633 CORE_ADDR addr, int len)
4634 {
4635 CORE_ADDR retval;
4636
4637 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4638 addr, len);
4639
4640 fprintf_unfiltered (gdb_stdlog,
4641 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4642 core_addr_to_string (addr), (unsigned long) len,
4643 core_addr_to_string (retval));
4644 return retval;
4645 }
4646
4647 static int
4648 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4649 CORE_ADDR addr, int len, int rw,
4650 struct expression *cond)
4651 {
4652 int retval;
4653
4654 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4655 addr, len,
4656 rw, cond);
4657
4658 fprintf_unfiltered (gdb_stdlog,
4659 "target_can_accel_watchpoint_condition "
4660 "(%s, %d, %d, %s) = %ld\n",
4661 core_addr_to_string (addr), len, rw,
4662 host_address_to_string (cond), (unsigned long) retval);
4663 return retval;
4664 }
4665
4666 static int
4667 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4668 {
4669 int retval;
4670
4671 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4672
4673 fprintf_unfiltered (gdb_stdlog,
4674 "target_stopped_by_watchpoint () = %ld\n",
4675 (unsigned long) retval);
4676 return retval;
4677 }
4678
4679 static int
4680 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4681 {
4682 int retval;
4683
4684 retval = debug_target.to_stopped_data_address (target, addr);
4685
4686 fprintf_unfiltered (gdb_stdlog,
4687 "target_stopped_data_address ([%s]) = %ld\n",
4688 core_addr_to_string (*addr),
4689 (unsigned long)retval);
4690 return retval;
4691 }
4692
4693 static int
4694 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4695 CORE_ADDR addr,
4696 CORE_ADDR start, int length)
4697 {
4698 int retval;
4699
4700 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4701 start, length);
4702
4703 fprintf_filtered (gdb_stdlog,
4704 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4705 core_addr_to_string (addr), core_addr_to_string (start),
4706 length, retval);
4707 return retval;
4708 }
4709
4710 static int
4711 debug_to_insert_hw_breakpoint (struct target_ops *self,
4712 struct gdbarch *gdbarch,
4713 struct bp_target_info *bp_tgt)
4714 {
4715 int retval;
4716
4717 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4718 gdbarch, bp_tgt);
4719
4720 fprintf_unfiltered (gdb_stdlog,
4721 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4722 core_addr_to_string (bp_tgt->placed_address),
4723 (unsigned long) retval);
4724 return retval;
4725 }
4726
4727 static int
4728 debug_to_remove_hw_breakpoint (struct target_ops *self,
4729 struct gdbarch *gdbarch,
4730 struct bp_target_info *bp_tgt)
4731 {
4732 int retval;
4733
4734 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4735 gdbarch, bp_tgt);
4736
4737 fprintf_unfiltered (gdb_stdlog,
4738 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4739 core_addr_to_string (bp_tgt->placed_address),
4740 (unsigned long) retval);
4741 return retval;
4742 }
4743
4744 static int
4745 debug_to_insert_watchpoint (struct target_ops *self,
4746 CORE_ADDR addr, int len, int type,
4747 struct expression *cond)
4748 {
4749 int retval;
4750
4751 retval = debug_target.to_insert_watchpoint (&debug_target,
4752 addr, len, type, cond);
4753
4754 fprintf_unfiltered (gdb_stdlog,
4755 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4756 core_addr_to_string (addr), len, type,
4757 host_address_to_string (cond), (unsigned long) retval);
4758 return retval;
4759 }
4760
4761 static int
4762 debug_to_remove_watchpoint (struct target_ops *self,
4763 CORE_ADDR addr, int len, int type,
4764 struct expression *cond)
4765 {
4766 int retval;
4767
4768 retval = debug_target.to_remove_watchpoint (&debug_target,
4769 addr, len, type, cond);
4770
4771 fprintf_unfiltered (gdb_stdlog,
4772 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4773 core_addr_to_string (addr), len, type,
4774 host_address_to_string (cond), (unsigned long) retval);
4775 return retval;
4776 }
4777
4778 static void
4779 debug_to_terminal_init (struct target_ops *self)
4780 {
4781 debug_target.to_terminal_init (&debug_target);
4782
4783 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4784 }
4785
4786 static void
4787 debug_to_terminal_inferior (struct target_ops *self)
4788 {
4789 debug_target.to_terminal_inferior (&debug_target);
4790
4791 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4792 }
4793
4794 static void
4795 debug_to_terminal_ours_for_output (struct target_ops *self)
4796 {
4797 debug_target.to_terminal_ours_for_output (&debug_target);
4798
4799 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4800 }
4801
4802 static void
4803 debug_to_terminal_ours (struct target_ops *self)
4804 {
4805 debug_target.to_terminal_ours (&debug_target);
4806
4807 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4808 }
4809
4810 static void
4811 debug_to_terminal_save_ours (struct target_ops *self)
4812 {
4813 debug_target.to_terminal_save_ours (&debug_target);
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4816 }
4817
4818 static void
4819 debug_to_terminal_info (struct target_ops *self,
4820 const char *arg, int from_tty)
4821 {
4822 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4823
4824 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4825 from_tty);
4826 }
4827
4828 static void
4829 debug_to_load (struct target_ops *self, char *args, int from_tty)
4830 {
4831 debug_target.to_load (&debug_target, args, from_tty);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4834 }
4835
4836 static void
4837 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4838 {
4839 debug_target.to_post_startup_inferior (&debug_target, ptid);
4840
4841 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4842 ptid_get_pid (ptid));
4843 }
4844
4845 static int
4846 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4847 {
4848 int retval;
4849
4850 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4851
4852 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4853 pid, retval);
4854
4855 return retval;
4856 }
4857
4858 static int
4859 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4860 {
4861 int retval;
4862
4863 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4864
4865 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4866 pid, retval);
4867
4868 return retval;
4869 }
4870
4871 static int
4872 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4873 {
4874 int retval;
4875
4876 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4877
4878 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4879 pid, retval);
4880
4881 return retval;
4882 }
4883
4884 static int
4885 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4886 {
4887 int retval;
4888
4889 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4890
4891 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4892 pid, retval);
4893
4894 return retval;
4895 }
4896
4897 static int
4898 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4899 {
4900 int retval;
4901
4902 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4903
4904 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4905 pid, retval);
4906
4907 return retval;
4908 }
4909
4910 static int
4911 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4912 {
4913 int retval;
4914
4915 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4916
4917 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4918 pid, retval);
4919
4920 return retval;
4921 }
4922
4923 static int
4924 debug_to_has_exited (struct target_ops *self,
4925 int pid, int wait_status, int *exit_status)
4926 {
4927 int has_exited;
4928
4929 has_exited = debug_target.to_has_exited (&debug_target,
4930 pid, wait_status, exit_status);
4931
4932 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4933 pid, wait_status, *exit_status, has_exited);
4934
4935 return has_exited;
4936 }
4937
4938 static int
4939 debug_to_can_run (struct target_ops *self)
4940 {
4941 int retval;
4942
4943 retval = debug_target.to_can_run (&debug_target);
4944
4945 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4946
4947 return retval;
4948 }
4949
4950 static struct gdbarch *
4951 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4952 {
4953 struct gdbarch *retval;
4954
4955 retval = debug_target.to_thread_architecture (ops, ptid);
4956
4957 fprintf_unfiltered (gdb_stdlog,
4958 "target_thread_architecture (%s) = %s [%s]\n",
4959 target_pid_to_str (ptid),
4960 host_address_to_string (retval),
4961 gdbarch_bfd_arch_info (retval)->printable_name);
4962 return retval;
4963 }
4964
4965 static void
4966 debug_to_stop (struct target_ops *self, ptid_t ptid)
4967 {
4968 debug_target.to_stop (&debug_target, ptid);
4969
4970 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4971 target_pid_to_str (ptid));
4972 }
4973
4974 static void
4975 debug_to_rcmd (struct target_ops *self, char *command,
4976 struct ui_file *outbuf)
4977 {
4978 debug_target.to_rcmd (&debug_target, command, outbuf);
4979 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4980 }
4981
4982 static char *
4983 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4984 {
4985 char *exec_file;
4986
4987 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4988
4989 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4990 pid, exec_file);
4991
4992 return exec_file;
4993 }
4994
4995 static void
4996 setup_target_debug (void)
4997 {
4998 memcpy (&debug_target, &current_target, sizeof debug_target);
4999
5000 current_target.to_open = debug_to_open;
5001 current_target.to_post_attach = debug_to_post_attach;
5002 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5003 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5004 current_target.to_files_info = debug_to_files_info;
5005 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5006 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5007 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5008 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5009 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5010 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5011 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5012 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5013 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5014 current_target.to_watchpoint_addr_within_range
5015 = debug_to_watchpoint_addr_within_range;
5016 current_target.to_region_ok_for_hw_watchpoint
5017 = debug_to_region_ok_for_hw_watchpoint;
5018 current_target.to_can_accel_watchpoint_condition
5019 = debug_to_can_accel_watchpoint_condition;
5020 current_target.to_terminal_init = debug_to_terminal_init;
5021 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5022 current_target.to_terminal_ours_for_output
5023 = debug_to_terminal_ours_for_output;
5024 current_target.to_terminal_ours = debug_to_terminal_ours;
5025 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5026 current_target.to_terminal_info = debug_to_terminal_info;
5027 current_target.to_load = debug_to_load;
5028 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5029 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5030 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5031 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5032 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5033 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5034 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5035 current_target.to_has_exited = debug_to_has_exited;
5036 current_target.to_can_run = debug_to_can_run;
5037 current_target.to_stop = debug_to_stop;
5038 current_target.to_rcmd = debug_to_rcmd;
5039 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5040 current_target.to_thread_architecture = debug_to_thread_architecture;
5041 }
5042 \f
5043
5044 static char targ_desc[] =
5045 "Names of targets and files being debugged.\nShows the entire \
5046 stack of targets currently in use (including the exec-file,\n\
5047 core-file, and process, if any), as well as the symbol file name.";
5048
5049 static void
5050 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
5051 {
5052 error (_("\"monitor\" command not supported by this target."));
5053 }
5054
5055 static void
5056 do_monitor_command (char *cmd,
5057 int from_tty)
5058 {
5059 target_rcmd (cmd, gdb_stdtarg);
5060 }
5061
5062 /* Print the name of each layers of our target stack. */
5063
5064 static void
5065 maintenance_print_target_stack (char *cmd, int from_tty)
5066 {
5067 struct target_ops *t;
5068
5069 printf_filtered (_("The current target stack is:\n"));
5070
5071 for (t = target_stack; t != NULL; t = t->beneath)
5072 {
5073 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5074 }
5075 }
5076
5077 /* Controls if async mode is permitted. */
5078 int target_async_permitted = 0;
5079
5080 /* The set command writes to this variable. If the inferior is
5081 executing, target_async_permitted is *not* updated. */
5082 static int target_async_permitted_1 = 0;
5083
5084 static void
5085 set_target_async_command (char *args, int from_tty,
5086 struct cmd_list_element *c)
5087 {
5088 if (have_live_inferiors ())
5089 {
5090 target_async_permitted_1 = target_async_permitted;
5091 error (_("Cannot change this setting while the inferior is running."));
5092 }
5093
5094 target_async_permitted = target_async_permitted_1;
5095 }
5096
5097 static void
5098 show_target_async_command (struct ui_file *file, int from_tty,
5099 struct cmd_list_element *c,
5100 const char *value)
5101 {
5102 fprintf_filtered (file,
5103 _("Controlling the inferior in "
5104 "asynchronous mode is %s.\n"), value);
5105 }
5106
5107 /* Temporary copies of permission settings. */
5108
5109 static int may_write_registers_1 = 1;
5110 static int may_write_memory_1 = 1;
5111 static int may_insert_breakpoints_1 = 1;
5112 static int may_insert_tracepoints_1 = 1;
5113 static int may_insert_fast_tracepoints_1 = 1;
5114 static int may_stop_1 = 1;
5115
5116 /* Make the user-set values match the real values again. */
5117
5118 void
5119 update_target_permissions (void)
5120 {
5121 may_write_registers_1 = may_write_registers;
5122 may_write_memory_1 = may_write_memory;
5123 may_insert_breakpoints_1 = may_insert_breakpoints;
5124 may_insert_tracepoints_1 = may_insert_tracepoints;
5125 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5126 may_stop_1 = may_stop;
5127 }
5128
5129 /* The one function handles (most of) the permission flags in the same
5130 way. */
5131
5132 static void
5133 set_target_permissions (char *args, int from_tty,
5134 struct cmd_list_element *c)
5135 {
5136 if (target_has_execution)
5137 {
5138 update_target_permissions ();
5139 error (_("Cannot change this setting while the inferior is running."));
5140 }
5141
5142 /* Make the real values match the user-changed values. */
5143 may_write_registers = may_write_registers_1;
5144 may_insert_breakpoints = may_insert_breakpoints_1;
5145 may_insert_tracepoints = may_insert_tracepoints_1;
5146 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5147 may_stop = may_stop_1;
5148 update_observer_mode ();
5149 }
5150
5151 /* Set memory write permission independently of observer mode. */
5152
5153 static void
5154 set_write_memory_permission (char *args, int from_tty,
5155 struct cmd_list_element *c)
5156 {
5157 /* Make the real values match the user-changed values. */
5158 may_write_memory = may_write_memory_1;
5159 update_observer_mode ();
5160 }
5161
5162
5163 void
5164 initialize_targets (void)
5165 {
5166 init_dummy_target ();
5167 push_target (&dummy_target);
5168
5169 add_info ("target", target_info, targ_desc);
5170 add_info ("files", target_info, targ_desc);
5171
5172 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5173 Set target debugging."), _("\
5174 Show target debugging."), _("\
5175 When non-zero, target debugging is enabled. Higher numbers are more\n\
5176 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5177 command."),
5178 NULL,
5179 show_targetdebug,
5180 &setdebuglist, &showdebuglist);
5181
5182 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5183 &trust_readonly, _("\
5184 Set mode for reading from readonly sections."), _("\
5185 Show mode for reading from readonly sections."), _("\
5186 When this mode is on, memory reads from readonly sections (such as .text)\n\
5187 will be read from the object file instead of from the target. This will\n\
5188 result in significant performance improvement for remote targets."),
5189 NULL,
5190 show_trust_readonly,
5191 &setlist, &showlist);
5192
5193 add_com ("monitor", class_obscure, do_monitor_command,
5194 _("Send a command to the remote monitor (remote targets only)."));
5195
5196 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5197 _("Print the name of each layer of the internal target stack."),
5198 &maintenanceprintlist);
5199
5200 add_setshow_boolean_cmd ("target-async", no_class,
5201 &target_async_permitted_1, _("\
5202 Set whether gdb controls the inferior in asynchronous mode."), _("\
5203 Show whether gdb controls the inferior in asynchronous mode."), _("\
5204 Tells gdb whether to control the inferior in asynchronous mode."),
5205 set_target_async_command,
5206 show_target_async_command,
5207 &setlist,
5208 &showlist);
5209
5210 add_setshow_boolean_cmd ("may-write-registers", class_support,
5211 &may_write_registers_1, _("\
5212 Set permission to write into registers."), _("\
5213 Show permission to write into registers."), _("\
5214 When this permission is on, GDB may write into the target's registers.\n\
5215 Otherwise, any sort of write attempt will result in an error."),
5216 set_target_permissions, NULL,
5217 &setlist, &showlist);
5218
5219 add_setshow_boolean_cmd ("may-write-memory", class_support,
5220 &may_write_memory_1, _("\
5221 Set permission to write into target memory."), _("\
5222 Show permission to write into target memory."), _("\
5223 When this permission is on, GDB may write into the target's memory.\n\
5224 Otherwise, any sort of write attempt will result in an error."),
5225 set_write_memory_permission, NULL,
5226 &setlist, &showlist);
5227
5228 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5229 &may_insert_breakpoints_1, _("\
5230 Set permission to insert breakpoints in the target."), _("\
5231 Show permission to insert breakpoints in the target."), _("\
5232 When this permission is on, GDB may insert breakpoints in the program.\n\
5233 Otherwise, any sort of insertion attempt will result in an error."),
5234 set_target_permissions, NULL,
5235 &setlist, &showlist);
5236
5237 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5238 &may_insert_tracepoints_1, _("\
5239 Set permission to insert tracepoints in the target."), _("\
5240 Show permission to insert tracepoints in the target."), _("\
5241 When this permission is on, GDB may insert tracepoints in the program.\n\
5242 Otherwise, any sort of insertion attempt will result in an error."),
5243 set_target_permissions, NULL,
5244 &setlist, &showlist);
5245
5246 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5247 &may_insert_fast_tracepoints_1, _("\
5248 Set permission to insert fast tracepoints in the target."), _("\
5249 Show permission to insert fast tracepoints in the target."), _("\
5250 When this permission is on, GDB may insert fast tracepoints.\n\
5251 Otherwise, any sort of insertion attempt will result in an error."),
5252 set_target_permissions, NULL,
5253 &setlist, &showlist);
5254
5255 add_setshow_boolean_cmd ("may-interrupt", class_support,
5256 &may_stop_1, _("\
5257 Set permission to interrupt or signal the target."), _("\
5258 Show permission to interrupt or signal the target."), _("\
5259 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5260 Otherwise, any attempt to interrupt or stop will be ignored."),
5261 set_target_permissions, NULL,
5262 &setlist, &showlist);
5263 }
This page took 0.137981 seconds and 4 git commands to generate.