Add target_ops argument to to_close
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 static void *return_null (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
78 ptid_t ptid);
79
80 static int find_default_can_async_p (struct target_ops *ignore);
81
82 static int find_default_is_async_p (struct target_ops *ignore);
83
84 #include "target-delegates.c"
85
86 static void init_dummy_target (void);
87
88 static struct target_ops debug_target;
89
90 static void debug_to_open (char *, int);
91
92 static void debug_to_prepare_to_store (struct target_ops *self,
93 struct regcache *);
94
95 static void debug_to_files_info (struct target_ops *);
96
97 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
98 struct bp_target_info *);
99
100 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_can_use_hw_breakpoint (int, int, int);
104
105 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
106 struct bp_target_info *);
107
108 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
112 struct expression *);
113
114 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
115 struct expression *);
116
117 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
118
119 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
120 CORE_ADDR, CORE_ADDR, int);
121
122 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
123
124 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
125 struct expression *);
126
127 static void debug_to_terminal_init (void);
128
129 static void debug_to_terminal_inferior (void);
130
131 static void debug_to_terminal_ours_for_output (void);
132
133 static void debug_to_terminal_save_ours (void);
134
135 static void debug_to_terminal_ours (void);
136
137 static void debug_to_load (char *, int);
138
139 static int debug_to_can_run (void);
140
141 static void debug_to_stop (ptid_t);
142
143 /* Pointer to array of target architecture structures; the size of the
144 array; the current index into the array; the allocated size of the
145 array. */
146 struct target_ops **target_structs;
147 unsigned target_struct_size;
148 unsigned target_struct_allocsize;
149 #define DEFAULT_ALLOCSIZE 10
150
151 /* The initial current target, so that there is always a semi-valid
152 current target. */
153
154 static struct target_ops dummy_target;
155
156 /* Top of target stack. */
157
158 static struct target_ops *target_stack;
159
160 /* The target structure we are currently using to talk to a process
161 or file or whatever "inferior" we have. */
162
163 struct target_ops current_target;
164
165 /* Command list for target. */
166
167 static struct cmd_list_element *targetlist = NULL;
168
169 /* Nonzero if we should trust readonly sections from the
170 executable when reading memory. */
171
172 static int trust_readonly = 0;
173
174 /* Nonzero if we should show true memory content including
175 memory breakpoint inserted by gdb. */
176
177 static int show_memory_breakpoints = 0;
178
179 /* These globals control whether GDB attempts to perform these
180 operations; they are useful for targets that need to prevent
181 inadvertant disruption, such as in non-stop mode. */
182
183 int may_write_registers = 1;
184
185 int may_write_memory = 1;
186
187 int may_insert_breakpoints = 1;
188
189 int may_insert_tracepoints = 1;
190
191 int may_insert_fast_tracepoints = 1;
192
193 int may_stop = 1;
194
195 /* Non-zero if we want to see trace of target level stuff. */
196
197 static unsigned int targetdebug = 0;
198 static void
199 show_targetdebug (struct ui_file *file, int from_tty,
200 struct cmd_list_element *c, const char *value)
201 {
202 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
203 }
204
205 static void setup_target_debug (void);
206
207 /* The user just typed 'target' without the name of a target. */
208
209 static void
210 target_command (char *arg, int from_tty)
211 {
212 fputs_filtered ("Argument required (target name). Try `help target'\n",
213 gdb_stdout);
214 }
215
216 /* Default target_has_* methods for process_stratum targets. */
217
218 int
219 default_child_has_all_memory (struct target_ops *ops)
220 {
221 /* If no inferior selected, then we can't read memory here. */
222 if (ptid_equal (inferior_ptid, null_ptid))
223 return 0;
224
225 return 1;
226 }
227
228 int
229 default_child_has_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_stack (struct target_ops *ops)
240 {
241 /* If no inferior selected, there's no stack. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_registers (struct target_ops *ops)
250 {
251 /* Can't read registers from no inferior. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
260 {
261 /* If there's no thread selected, then we can't make it run through
262 hoops. */
263 if (ptid_equal (the_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269
270 int
271 target_has_all_memory_1 (void)
272 {
273 struct target_ops *t;
274
275 for (t = current_target.beneath; t != NULL; t = t->beneath)
276 if (t->to_has_all_memory (t))
277 return 1;
278
279 return 0;
280 }
281
282 int
283 target_has_memory_1 (void)
284 {
285 struct target_ops *t;
286
287 for (t = current_target.beneath; t != NULL; t = t->beneath)
288 if (t->to_has_memory (t))
289 return 1;
290
291 return 0;
292 }
293
294 int
295 target_has_stack_1 (void)
296 {
297 struct target_ops *t;
298
299 for (t = current_target.beneath; t != NULL; t = t->beneath)
300 if (t->to_has_stack (t))
301 return 1;
302
303 return 0;
304 }
305
306 int
307 target_has_registers_1 (void)
308 {
309 struct target_ops *t;
310
311 for (t = current_target.beneath; t != NULL; t = t->beneath)
312 if (t->to_has_registers (t))
313 return 1;
314
315 return 0;
316 }
317
318 int
319 target_has_execution_1 (ptid_t the_ptid)
320 {
321 struct target_ops *t;
322
323 for (t = current_target.beneath; t != NULL; t = t->beneath)
324 if (t->to_has_execution (t, the_ptid))
325 return 1;
326
327 return 0;
328 }
329
330 int
331 target_has_execution_current (void)
332 {
333 return target_has_execution_1 (inferior_ptid);
334 }
335
336 /* Complete initialization of T. This ensures that various fields in
337 T are set, if needed by the target implementation. */
338
339 void
340 complete_target_initialization (struct target_ops *t)
341 {
342 /* Provide default values for all "must have" methods. */
343 if (t->to_xfer_partial == NULL)
344 t->to_xfer_partial = default_xfer_partial;
345
346 if (t->to_has_all_memory == NULL)
347 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
348
349 if (t->to_has_memory == NULL)
350 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
351
352 if (t->to_has_stack == NULL)
353 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
354
355 if (t->to_has_registers == NULL)
356 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
357
358 if (t->to_has_execution == NULL)
359 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
360
361 install_delegators (t);
362 }
363
364 /* Add possible target architecture T to the list and add a new
365 command 'target T->to_shortname'. Set COMPLETER as the command's
366 completer if not NULL. */
367
368 void
369 add_target_with_completer (struct target_ops *t,
370 completer_ftype *completer)
371 {
372 struct cmd_list_element *c;
373
374 complete_target_initialization (t);
375
376 if (!target_structs)
377 {
378 target_struct_allocsize = DEFAULT_ALLOCSIZE;
379 target_structs = (struct target_ops **) xmalloc
380 (target_struct_allocsize * sizeof (*target_structs));
381 }
382 if (target_struct_size >= target_struct_allocsize)
383 {
384 target_struct_allocsize *= 2;
385 target_structs = (struct target_ops **)
386 xrealloc ((char *) target_structs,
387 target_struct_allocsize * sizeof (*target_structs));
388 }
389 target_structs[target_struct_size++] = t;
390
391 if (targetlist == NULL)
392 add_prefix_cmd ("target", class_run, target_command, _("\
393 Connect to a target machine or process.\n\
394 The first argument is the type or protocol of the target machine.\n\
395 Remaining arguments are interpreted by the target protocol. For more\n\
396 information on the arguments for a particular protocol, type\n\
397 `help target ' followed by the protocol name."),
398 &targetlist, "target ", 0, &cmdlist);
399 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
400 &targetlist);
401 if (completer != NULL)
402 set_cmd_completer (c, completer);
403 }
404
405 /* Add a possible target architecture to the list. */
406
407 void
408 add_target (struct target_ops *t)
409 {
410 add_target_with_completer (t, NULL);
411 }
412
413 /* See target.h. */
414
415 void
416 add_deprecated_target_alias (struct target_ops *t, char *alias)
417 {
418 struct cmd_list_element *c;
419 char *alt;
420
421 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
422 see PR cli/15104. */
423 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
424 alt = xstrprintf ("target %s", t->to_shortname);
425 deprecate_cmd (c, alt);
426 }
427
428 /* Stub functions */
429
430 void
431 target_ignore (void)
432 {
433 }
434
435 void
436 target_kill (void)
437 {
438 struct target_ops *t;
439
440 for (t = current_target.beneath; t != NULL; t = t->beneath)
441 if (t->to_kill != NULL)
442 {
443 if (targetdebug)
444 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
445
446 t->to_kill (t);
447 return;
448 }
449
450 noprocess ();
451 }
452
453 void
454 target_load (char *arg, int from_tty)
455 {
456 target_dcache_invalidate ();
457 (*current_target.to_load) (arg, from_tty);
458 }
459
460 void
461 target_create_inferior (char *exec_file, char *args,
462 char **env, int from_tty)
463 {
464 struct target_ops *t;
465
466 for (t = current_target.beneath; t != NULL; t = t->beneath)
467 {
468 if (t->to_create_inferior != NULL)
469 {
470 t->to_create_inferior (t, exec_file, args, env, from_tty);
471 if (targetdebug)
472 fprintf_unfiltered (gdb_stdlog,
473 "target_create_inferior (%s, %s, xxx, %d)\n",
474 exec_file, args, from_tty);
475 return;
476 }
477 }
478
479 internal_error (__FILE__, __LINE__,
480 _("could not find a target to create inferior"));
481 }
482
483 void
484 target_terminal_inferior (void)
485 {
486 /* A background resume (``run&'') should leave GDB in control of the
487 terminal. Use target_can_async_p, not target_is_async_p, since at
488 this point the target is not async yet. However, if sync_execution
489 is not set, we know it will become async prior to resume. */
490 if (target_can_async_p () && !sync_execution)
491 return;
492
493 /* If GDB is resuming the inferior in the foreground, install
494 inferior's terminal modes. */
495 (*current_target.to_terminal_inferior) ();
496 }
497
498 static int
499 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
500 struct target_ops *t)
501 {
502 errno = EIO; /* Can't read/write this location. */
503 return 0; /* No bytes handled. */
504 }
505
506 static void
507 tcomplain (void)
508 {
509 error (_("You can't do that when your target is `%s'"),
510 current_target.to_shortname);
511 }
512
513 void
514 noprocess (void)
515 {
516 error (_("You can't do that without a process to debug."));
517 }
518
519 static void
520 default_terminal_info (const char *args, int from_tty)
521 {
522 printf_unfiltered (_("No saved terminal information.\n"));
523 }
524
525 /* A default implementation for the to_get_ada_task_ptid target method.
526
527 This function builds the PTID by using both LWP and TID as part of
528 the PTID lwp and tid elements. The pid used is the pid of the
529 inferior_ptid. */
530
531 static ptid_t
532 default_get_ada_task_ptid (long lwp, long tid)
533 {
534 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
535 }
536
537 static enum exec_direction_kind
538 default_execution_direction (void)
539 {
540 if (!target_can_execute_reverse)
541 return EXEC_FORWARD;
542 else if (!target_can_async_p ())
543 return EXEC_FORWARD;
544 else
545 gdb_assert_not_reached ("\
546 to_execution_direction must be implemented for reverse async");
547 }
548
549 /* Go through the target stack from top to bottom, copying over zero
550 entries in current_target, then filling in still empty entries. In
551 effect, we are doing class inheritance through the pushed target
552 vectors.
553
554 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
555 is currently implemented, is that it discards any knowledge of
556 which target an inherited method originally belonged to.
557 Consequently, new new target methods should instead explicitly and
558 locally search the target stack for the target that can handle the
559 request. */
560
561 static void
562 update_current_target (void)
563 {
564 struct target_ops *t;
565
566 /* First, reset current's contents. */
567 memset (&current_target, 0, sizeof (current_target));
568
569 /* Install the delegators. */
570 install_delegators (&current_target);
571
572 #define INHERIT(FIELD, TARGET) \
573 if (!current_target.FIELD) \
574 current_target.FIELD = (TARGET)->FIELD
575
576 for (t = target_stack; t; t = t->beneath)
577 {
578 INHERIT (to_shortname, t);
579 INHERIT (to_longname, t);
580 INHERIT (to_doc, t);
581 /* Do not inherit to_open. */
582 /* Do not inherit to_close. */
583 /* Do not inherit to_attach. */
584 INHERIT (to_post_attach, t);
585 INHERIT (to_attach_no_wait, t);
586 /* Do not inherit to_detach. */
587 /* Do not inherit to_disconnect. */
588 /* Do not inherit to_resume. */
589 /* Do not inherit to_wait. */
590 /* Do not inherit to_fetch_registers. */
591 /* Do not inherit to_store_registers. */
592 INHERIT (to_prepare_to_store, t);
593 INHERIT (deprecated_xfer_memory, t);
594 INHERIT (to_files_info, t);
595 /* Do not inherit to_insert_breakpoint. */
596 /* Do not inherit to_remove_breakpoint. */
597 INHERIT (to_can_use_hw_breakpoint, t);
598 INHERIT (to_insert_hw_breakpoint, t);
599 INHERIT (to_remove_hw_breakpoint, t);
600 /* Do not inherit to_ranged_break_num_registers. */
601 INHERIT (to_insert_watchpoint, t);
602 INHERIT (to_remove_watchpoint, t);
603 /* Do not inherit to_insert_mask_watchpoint. */
604 /* Do not inherit to_remove_mask_watchpoint. */
605 /* Do not inherit to_stopped_data_address. */
606 INHERIT (to_have_steppable_watchpoint, t);
607 INHERIT (to_have_continuable_watchpoint, t);
608 /* Do not inherit to_stopped_by_watchpoint. */
609 INHERIT (to_watchpoint_addr_within_range, t);
610 INHERIT (to_region_ok_for_hw_watchpoint, t);
611 INHERIT (to_can_accel_watchpoint_condition, t);
612 /* Do not inherit to_masked_watch_num_registers. */
613 INHERIT (to_terminal_init, t);
614 INHERIT (to_terminal_inferior, t);
615 INHERIT (to_terminal_ours_for_output, t);
616 INHERIT (to_terminal_ours, t);
617 INHERIT (to_terminal_save_ours, t);
618 INHERIT (to_terminal_info, t);
619 /* Do not inherit to_kill. */
620 INHERIT (to_load, t);
621 /* Do no inherit to_create_inferior. */
622 INHERIT (to_post_startup_inferior, t);
623 INHERIT (to_insert_fork_catchpoint, t);
624 INHERIT (to_remove_fork_catchpoint, t);
625 INHERIT (to_insert_vfork_catchpoint, t);
626 INHERIT (to_remove_vfork_catchpoint, t);
627 /* Do not inherit to_follow_fork. */
628 INHERIT (to_insert_exec_catchpoint, t);
629 INHERIT (to_remove_exec_catchpoint, t);
630 INHERIT (to_set_syscall_catchpoint, t);
631 INHERIT (to_has_exited, t);
632 /* Do not inherit to_mourn_inferior. */
633 INHERIT (to_can_run, t);
634 /* Do not inherit to_pass_signals. */
635 /* Do not inherit to_program_signals. */
636 /* Do not inherit to_thread_alive. */
637 /* Do not inherit to_find_new_threads. */
638 /* Do not inherit to_pid_to_str. */
639 INHERIT (to_extra_thread_info, t);
640 INHERIT (to_thread_name, t);
641 INHERIT (to_stop, t);
642 /* Do not inherit to_xfer_partial. */
643 INHERIT (to_rcmd, t);
644 INHERIT (to_pid_to_exec_file, t);
645 INHERIT (to_log_command, t);
646 INHERIT (to_stratum, t);
647 /* Do not inherit to_has_all_memory. */
648 /* Do not inherit to_has_memory. */
649 /* Do not inherit to_has_stack. */
650 /* Do not inherit to_has_registers. */
651 /* Do not inherit to_has_execution. */
652 INHERIT (to_has_thread_control, t);
653 /* Do not inherit to_can_async_p. */
654 /* Do not inherit to_is_async_p. */
655 /* Do not inherit to_async. */
656 INHERIT (to_find_memory_regions, t);
657 INHERIT (to_make_corefile_notes, t);
658 INHERIT (to_get_bookmark, t);
659 INHERIT (to_goto_bookmark, t);
660 /* Do not inherit to_get_thread_local_address. */
661 INHERIT (to_can_execute_reverse, t);
662 INHERIT (to_execution_direction, t);
663 INHERIT (to_thread_architecture, t);
664 /* Do not inherit to_read_description. */
665 INHERIT (to_get_ada_task_ptid, t);
666 /* Do not inherit to_search_memory. */
667 INHERIT (to_supports_multi_process, t);
668 INHERIT (to_supports_enable_disable_tracepoint, t);
669 INHERIT (to_supports_string_tracing, t);
670 INHERIT (to_trace_init, t);
671 INHERIT (to_download_tracepoint, t);
672 INHERIT (to_can_download_tracepoint, t);
673 INHERIT (to_download_trace_state_variable, t);
674 INHERIT (to_enable_tracepoint, t);
675 INHERIT (to_disable_tracepoint, t);
676 INHERIT (to_trace_set_readonly_regions, t);
677 INHERIT (to_trace_start, t);
678 INHERIT (to_get_trace_status, t);
679 INHERIT (to_get_tracepoint_status, t);
680 INHERIT (to_trace_stop, t);
681 INHERIT (to_trace_find, t);
682 INHERIT (to_get_trace_state_variable_value, t);
683 INHERIT (to_save_trace_data, t);
684 INHERIT (to_upload_tracepoints, t);
685 INHERIT (to_upload_trace_state_variables, t);
686 INHERIT (to_get_raw_trace_data, t);
687 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
688 INHERIT (to_set_disconnected_tracing, t);
689 INHERIT (to_set_circular_trace_buffer, t);
690 INHERIT (to_set_trace_buffer_size, t);
691 INHERIT (to_set_trace_notes, t);
692 INHERIT (to_get_tib_address, t);
693 INHERIT (to_set_permissions, t);
694 INHERIT (to_static_tracepoint_marker_at, t);
695 INHERIT (to_static_tracepoint_markers_by_strid, t);
696 INHERIT (to_traceframe_info, t);
697 INHERIT (to_use_agent, t);
698 INHERIT (to_can_use_agent, t);
699 INHERIT (to_augmented_libraries_svr4_read, t);
700 INHERIT (to_magic, t);
701 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
702 INHERIT (to_can_run_breakpoint_commands, t);
703 /* Do not inherit to_memory_map. */
704 /* Do not inherit to_flash_erase. */
705 /* Do not inherit to_flash_done. */
706 }
707 #undef INHERIT
708
709 /* Clean up a target struct so it no longer has any zero pointers in
710 it. Some entries are defaulted to a method that print an error,
711 others are hard-wired to a standard recursive default. */
712
713 #define de_fault(field, value) \
714 if (!current_target.field) \
715 current_target.field = value
716
717 de_fault (to_open,
718 (void (*) (char *, int))
719 tcomplain);
720 de_fault (to_close,
721 (void (*) (struct target_ops *))
722 target_ignore);
723 de_fault (to_post_attach,
724 (void (*) (int))
725 target_ignore);
726 de_fault (to_prepare_to_store,
727 (void (*) (struct target_ops *, struct regcache *))
728 noprocess);
729 de_fault (deprecated_xfer_memory,
730 (int (*) (CORE_ADDR, gdb_byte *, int, int,
731 struct mem_attrib *, struct target_ops *))
732 nomemory);
733 de_fault (to_files_info,
734 (void (*) (struct target_ops *))
735 target_ignore);
736 de_fault (to_can_use_hw_breakpoint,
737 (int (*) (int, int, int))
738 return_zero);
739 de_fault (to_insert_hw_breakpoint,
740 (int (*) (struct gdbarch *, struct bp_target_info *))
741 return_minus_one);
742 de_fault (to_remove_hw_breakpoint,
743 (int (*) (struct gdbarch *, struct bp_target_info *))
744 return_minus_one);
745 de_fault (to_insert_watchpoint,
746 (int (*) (CORE_ADDR, int, int, struct expression *))
747 return_minus_one);
748 de_fault (to_remove_watchpoint,
749 (int (*) (CORE_ADDR, int, int, struct expression *))
750 return_minus_one);
751 de_fault (to_watchpoint_addr_within_range,
752 default_watchpoint_addr_within_range);
753 de_fault (to_region_ok_for_hw_watchpoint,
754 default_region_ok_for_hw_watchpoint);
755 de_fault (to_can_accel_watchpoint_condition,
756 (int (*) (CORE_ADDR, int, int, struct expression *))
757 return_zero);
758 de_fault (to_terminal_init,
759 (void (*) (void))
760 target_ignore);
761 de_fault (to_terminal_inferior,
762 (void (*) (void))
763 target_ignore);
764 de_fault (to_terminal_ours_for_output,
765 (void (*) (void))
766 target_ignore);
767 de_fault (to_terminal_ours,
768 (void (*) (void))
769 target_ignore);
770 de_fault (to_terminal_save_ours,
771 (void (*) (void))
772 target_ignore);
773 de_fault (to_terminal_info,
774 default_terminal_info);
775 de_fault (to_load,
776 (void (*) (char *, int))
777 tcomplain);
778 de_fault (to_post_startup_inferior,
779 (void (*) (ptid_t))
780 target_ignore);
781 de_fault (to_insert_fork_catchpoint,
782 (int (*) (int))
783 return_one);
784 de_fault (to_remove_fork_catchpoint,
785 (int (*) (int))
786 return_one);
787 de_fault (to_insert_vfork_catchpoint,
788 (int (*) (int))
789 return_one);
790 de_fault (to_remove_vfork_catchpoint,
791 (int (*) (int))
792 return_one);
793 de_fault (to_insert_exec_catchpoint,
794 (int (*) (int))
795 return_one);
796 de_fault (to_remove_exec_catchpoint,
797 (int (*) (int))
798 return_one);
799 de_fault (to_set_syscall_catchpoint,
800 (int (*) (int, int, int, int, int *))
801 return_one);
802 de_fault (to_has_exited,
803 (int (*) (int, int, int *))
804 return_zero);
805 de_fault (to_can_run,
806 return_zero);
807 de_fault (to_extra_thread_info,
808 (char *(*) (struct thread_info *))
809 return_null);
810 de_fault (to_thread_name,
811 (char *(*) (struct thread_info *))
812 return_null);
813 de_fault (to_stop,
814 (void (*) (ptid_t))
815 target_ignore);
816 de_fault (to_rcmd,
817 (void (*) (char *, struct ui_file *))
818 tcomplain);
819 de_fault (to_pid_to_exec_file,
820 (char *(*) (int))
821 return_null);
822 de_fault (to_thread_architecture,
823 default_thread_architecture);
824 current_target.to_read_description = NULL;
825 de_fault (to_get_ada_task_ptid,
826 (ptid_t (*) (long, long))
827 default_get_ada_task_ptid);
828 de_fault (to_supports_multi_process,
829 (int (*) (void))
830 return_zero);
831 de_fault (to_supports_enable_disable_tracepoint,
832 (int (*) (void))
833 return_zero);
834 de_fault (to_supports_string_tracing,
835 (int (*) (void))
836 return_zero);
837 de_fault (to_trace_init,
838 (void (*) (void))
839 tcomplain);
840 de_fault (to_download_tracepoint,
841 (void (*) (struct bp_location *))
842 tcomplain);
843 de_fault (to_can_download_tracepoint,
844 (int (*) (void))
845 return_zero);
846 de_fault (to_download_trace_state_variable,
847 (void (*) (struct trace_state_variable *))
848 tcomplain);
849 de_fault (to_enable_tracepoint,
850 (void (*) (struct bp_location *))
851 tcomplain);
852 de_fault (to_disable_tracepoint,
853 (void (*) (struct bp_location *))
854 tcomplain);
855 de_fault (to_trace_set_readonly_regions,
856 (void (*) (void))
857 tcomplain);
858 de_fault (to_trace_start,
859 (void (*) (void))
860 tcomplain);
861 de_fault (to_get_trace_status,
862 (int (*) (struct trace_status *))
863 return_minus_one);
864 de_fault (to_get_tracepoint_status,
865 (void (*) (struct breakpoint *, struct uploaded_tp *))
866 tcomplain);
867 de_fault (to_trace_stop,
868 (void (*) (void))
869 tcomplain);
870 de_fault (to_trace_find,
871 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
872 return_minus_one);
873 de_fault (to_get_trace_state_variable_value,
874 (int (*) (int, LONGEST *))
875 return_zero);
876 de_fault (to_save_trace_data,
877 (int (*) (const char *))
878 tcomplain);
879 de_fault (to_upload_tracepoints,
880 (int (*) (struct uploaded_tp **))
881 return_zero);
882 de_fault (to_upload_trace_state_variables,
883 (int (*) (struct uploaded_tsv **))
884 return_zero);
885 de_fault (to_get_raw_trace_data,
886 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
887 tcomplain);
888 de_fault (to_get_min_fast_tracepoint_insn_len,
889 (int (*) (void))
890 return_minus_one);
891 de_fault (to_set_disconnected_tracing,
892 (void (*) (int))
893 target_ignore);
894 de_fault (to_set_circular_trace_buffer,
895 (void (*) (int))
896 target_ignore);
897 de_fault (to_set_trace_buffer_size,
898 (void (*) (LONGEST))
899 target_ignore);
900 de_fault (to_set_trace_notes,
901 (int (*) (const char *, const char *, const char *))
902 return_zero);
903 de_fault (to_get_tib_address,
904 (int (*) (ptid_t, CORE_ADDR *))
905 tcomplain);
906 de_fault (to_set_permissions,
907 (void (*) (void))
908 target_ignore);
909 de_fault (to_static_tracepoint_marker_at,
910 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
911 return_zero);
912 de_fault (to_static_tracepoint_markers_by_strid,
913 (VEC(static_tracepoint_marker_p) * (*) (const char *))
914 tcomplain);
915 de_fault (to_traceframe_info,
916 (struct traceframe_info * (*) (void))
917 return_null);
918 de_fault (to_supports_evaluation_of_breakpoint_conditions,
919 (int (*) (void))
920 return_zero);
921 de_fault (to_can_run_breakpoint_commands,
922 (int (*) (void))
923 return_zero);
924 de_fault (to_use_agent,
925 (int (*) (int))
926 tcomplain);
927 de_fault (to_can_use_agent,
928 (int (*) (void))
929 return_zero);
930 de_fault (to_augmented_libraries_svr4_read,
931 (int (*) (void))
932 return_zero);
933 de_fault (to_execution_direction, default_execution_direction);
934
935 #undef de_fault
936
937 /* Finally, position the target-stack beneath the squashed
938 "current_target". That way code looking for a non-inherited
939 target method can quickly and simply find it. */
940 current_target.beneath = target_stack;
941
942 if (targetdebug)
943 setup_target_debug ();
944 }
945
946 /* Push a new target type into the stack of the existing target accessors,
947 possibly superseding some of the existing accessors.
948
949 Rather than allow an empty stack, we always have the dummy target at
950 the bottom stratum, so we can call the function vectors without
951 checking them. */
952
953 void
954 push_target (struct target_ops *t)
955 {
956 struct target_ops **cur;
957
958 /* Check magic number. If wrong, it probably means someone changed
959 the struct definition, but not all the places that initialize one. */
960 if (t->to_magic != OPS_MAGIC)
961 {
962 fprintf_unfiltered (gdb_stderr,
963 "Magic number of %s target struct wrong\n",
964 t->to_shortname);
965 internal_error (__FILE__, __LINE__,
966 _("failed internal consistency check"));
967 }
968
969 /* Find the proper stratum to install this target in. */
970 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
971 {
972 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
973 break;
974 }
975
976 /* If there's already targets at this stratum, remove them. */
977 /* FIXME: cagney/2003-10-15: I think this should be popping all
978 targets to CUR, and not just those at this stratum level. */
979 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
980 {
981 /* There's already something at this stratum level. Close it,
982 and un-hook it from the stack. */
983 struct target_ops *tmp = (*cur);
984
985 (*cur) = (*cur)->beneath;
986 tmp->beneath = NULL;
987 target_close (tmp);
988 }
989
990 /* We have removed all targets in our stratum, now add the new one. */
991 t->beneath = (*cur);
992 (*cur) = t;
993
994 update_current_target ();
995 }
996
997 /* Remove a target_ops vector from the stack, wherever it may be.
998 Return how many times it was removed (0 or 1). */
999
1000 int
1001 unpush_target (struct target_ops *t)
1002 {
1003 struct target_ops **cur;
1004 struct target_ops *tmp;
1005
1006 if (t->to_stratum == dummy_stratum)
1007 internal_error (__FILE__, __LINE__,
1008 _("Attempt to unpush the dummy target"));
1009
1010 /* Look for the specified target. Note that we assume that a target
1011 can only occur once in the target stack. */
1012
1013 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1014 {
1015 if ((*cur) == t)
1016 break;
1017 }
1018
1019 /* If we don't find target_ops, quit. Only open targets should be
1020 closed. */
1021 if ((*cur) == NULL)
1022 return 0;
1023
1024 /* Unchain the target. */
1025 tmp = (*cur);
1026 (*cur) = (*cur)->beneath;
1027 tmp->beneath = NULL;
1028
1029 update_current_target ();
1030
1031 /* Finally close the target. Note we do this after unchaining, so
1032 any target method calls from within the target_close
1033 implementation don't end up in T anymore. */
1034 target_close (t);
1035
1036 return 1;
1037 }
1038
1039 void
1040 pop_all_targets_above (enum strata above_stratum)
1041 {
1042 while ((int) (current_target.to_stratum) > (int) above_stratum)
1043 {
1044 if (!unpush_target (target_stack))
1045 {
1046 fprintf_unfiltered (gdb_stderr,
1047 "pop_all_targets couldn't find target %s\n",
1048 target_stack->to_shortname);
1049 internal_error (__FILE__, __LINE__,
1050 _("failed internal consistency check"));
1051 break;
1052 }
1053 }
1054 }
1055
1056 void
1057 pop_all_targets (void)
1058 {
1059 pop_all_targets_above (dummy_stratum);
1060 }
1061
1062 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1063
1064 int
1065 target_is_pushed (struct target_ops *t)
1066 {
1067 struct target_ops **cur;
1068
1069 /* Check magic number. If wrong, it probably means someone changed
1070 the struct definition, but not all the places that initialize one. */
1071 if (t->to_magic != OPS_MAGIC)
1072 {
1073 fprintf_unfiltered (gdb_stderr,
1074 "Magic number of %s target struct wrong\n",
1075 t->to_shortname);
1076 internal_error (__FILE__, __LINE__,
1077 _("failed internal consistency check"));
1078 }
1079
1080 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1081 if (*cur == t)
1082 return 1;
1083
1084 return 0;
1085 }
1086
1087 /* Using the objfile specified in OBJFILE, find the address for the
1088 current thread's thread-local storage with offset OFFSET. */
1089 CORE_ADDR
1090 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1091 {
1092 volatile CORE_ADDR addr = 0;
1093 struct target_ops *target;
1094
1095 for (target = current_target.beneath;
1096 target != NULL;
1097 target = target->beneath)
1098 {
1099 if (target->to_get_thread_local_address != NULL)
1100 break;
1101 }
1102
1103 if (target != NULL
1104 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1105 {
1106 ptid_t ptid = inferior_ptid;
1107 volatile struct gdb_exception ex;
1108
1109 TRY_CATCH (ex, RETURN_MASK_ALL)
1110 {
1111 CORE_ADDR lm_addr;
1112
1113 /* Fetch the load module address for this objfile. */
1114 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1115 objfile);
1116 /* If it's 0, throw the appropriate exception. */
1117 if (lm_addr == 0)
1118 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1119 _("TLS load module not found"));
1120
1121 addr = target->to_get_thread_local_address (target, ptid,
1122 lm_addr, offset);
1123 }
1124 /* If an error occurred, print TLS related messages here. Otherwise,
1125 throw the error to some higher catcher. */
1126 if (ex.reason < 0)
1127 {
1128 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1129
1130 switch (ex.error)
1131 {
1132 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1133 error (_("Cannot find thread-local variables "
1134 "in this thread library."));
1135 break;
1136 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1137 if (objfile_is_library)
1138 error (_("Cannot find shared library `%s' in dynamic"
1139 " linker's load module list"), objfile_name (objfile));
1140 else
1141 error (_("Cannot find executable file `%s' in dynamic"
1142 " linker's load module list"), objfile_name (objfile));
1143 break;
1144 case TLS_NOT_ALLOCATED_YET_ERROR:
1145 if (objfile_is_library)
1146 error (_("The inferior has not yet allocated storage for"
1147 " thread-local variables in\n"
1148 "the shared library `%s'\n"
1149 "for %s"),
1150 objfile_name (objfile), target_pid_to_str (ptid));
1151 else
1152 error (_("The inferior has not yet allocated storage for"
1153 " thread-local variables in\n"
1154 "the executable `%s'\n"
1155 "for %s"),
1156 objfile_name (objfile), target_pid_to_str (ptid));
1157 break;
1158 case TLS_GENERIC_ERROR:
1159 if (objfile_is_library)
1160 error (_("Cannot find thread-local storage for %s, "
1161 "shared library %s:\n%s"),
1162 target_pid_to_str (ptid),
1163 objfile_name (objfile), ex.message);
1164 else
1165 error (_("Cannot find thread-local storage for %s, "
1166 "executable file %s:\n%s"),
1167 target_pid_to_str (ptid),
1168 objfile_name (objfile), ex.message);
1169 break;
1170 default:
1171 throw_exception (ex);
1172 break;
1173 }
1174 }
1175 }
1176 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1177 TLS is an ABI-specific thing. But we don't do that yet. */
1178 else
1179 error (_("Cannot find thread-local variables on this target"));
1180
1181 return addr;
1182 }
1183
1184 const char *
1185 target_xfer_status_to_string (enum target_xfer_status err)
1186 {
1187 #define CASE(X) case X: return #X
1188 switch (err)
1189 {
1190 CASE(TARGET_XFER_E_IO);
1191 CASE(TARGET_XFER_E_UNAVAILABLE);
1192 default:
1193 return "<unknown>";
1194 }
1195 #undef CASE
1196 };
1197
1198
1199 #undef MIN
1200 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1201
1202 /* target_read_string -- read a null terminated string, up to LEN bytes,
1203 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1204 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1205 is responsible for freeing it. Return the number of bytes successfully
1206 read. */
1207
1208 int
1209 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1210 {
1211 int tlen, offset, i;
1212 gdb_byte buf[4];
1213 int errcode = 0;
1214 char *buffer;
1215 int buffer_allocated;
1216 char *bufptr;
1217 unsigned int nbytes_read = 0;
1218
1219 gdb_assert (string);
1220
1221 /* Small for testing. */
1222 buffer_allocated = 4;
1223 buffer = xmalloc (buffer_allocated);
1224 bufptr = buffer;
1225
1226 while (len > 0)
1227 {
1228 tlen = MIN (len, 4 - (memaddr & 3));
1229 offset = memaddr & 3;
1230
1231 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1232 if (errcode != 0)
1233 {
1234 /* The transfer request might have crossed the boundary to an
1235 unallocated region of memory. Retry the transfer, requesting
1236 a single byte. */
1237 tlen = 1;
1238 offset = 0;
1239 errcode = target_read_memory (memaddr, buf, 1);
1240 if (errcode != 0)
1241 goto done;
1242 }
1243
1244 if (bufptr - buffer + tlen > buffer_allocated)
1245 {
1246 unsigned int bytes;
1247
1248 bytes = bufptr - buffer;
1249 buffer_allocated *= 2;
1250 buffer = xrealloc (buffer, buffer_allocated);
1251 bufptr = buffer + bytes;
1252 }
1253
1254 for (i = 0; i < tlen; i++)
1255 {
1256 *bufptr++ = buf[i + offset];
1257 if (buf[i + offset] == '\000')
1258 {
1259 nbytes_read += i + 1;
1260 goto done;
1261 }
1262 }
1263
1264 memaddr += tlen;
1265 len -= tlen;
1266 nbytes_read += tlen;
1267 }
1268 done:
1269 *string = buffer;
1270 if (errnop != NULL)
1271 *errnop = errcode;
1272 return nbytes_read;
1273 }
1274
1275 struct target_section_table *
1276 target_get_section_table (struct target_ops *target)
1277 {
1278 struct target_ops *t;
1279
1280 if (targetdebug)
1281 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1282
1283 for (t = target; t != NULL; t = t->beneath)
1284 if (t->to_get_section_table != NULL)
1285 return (*t->to_get_section_table) (t);
1286
1287 return NULL;
1288 }
1289
1290 /* Find a section containing ADDR. */
1291
1292 struct target_section *
1293 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1294 {
1295 struct target_section_table *table = target_get_section_table (target);
1296 struct target_section *secp;
1297
1298 if (table == NULL)
1299 return NULL;
1300
1301 for (secp = table->sections; secp < table->sections_end; secp++)
1302 {
1303 if (addr >= secp->addr && addr < secp->endaddr)
1304 return secp;
1305 }
1306 return NULL;
1307 }
1308
1309 /* Read memory from the live target, even if currently inspecting a
1310 traceframe. The return is the same as that of target_read. */
1311
1312 static enum target_xfer_status
1313 target_read_live_memory (enum target_object object,
1314 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1315 ULONGEST *xfered_len)
1316 {
1317 enum target_xfer_status ret;
1318 struct cleanup *cleanup;
1319
1320 /* Switch momentarily out of tfind mode so to access live memory.
1321 Note that this must not clear global state, such as the frame
1322 cache, which must still remain valid for the previous traceframe.
1323 We may be _building_ the frame cache at this point. */
1324 cleanup = make_cleanup_restore_traceframe_number ();
1325 set_traceframe_number (-1);
1326
1327 ret = target_xfer_partial (current_target.beneath, object, NULL,
1328 myaddr, NULL, memaddr, len, xfered_len);
1329
1330 do_cleanups (cleanup);
1331 return ret;
1332 }
1333
1334 /* Using the set of read-only target sections of OPS, read live
1335 read-only memory. Note that the actual reads start from the
1336 top-most target again.
1337
1338 For interface/parameters/return description see target.h,
1339 to_xfer_partial. */
1340
1341 static enum target_xfer_status
1342 memory_xfer_live_readonly_partial (struct target_ops *ops,
1343 enum target_object object,
1344 gdb_byte *readbuf, ULONGEST memaddr,
1345 ULONGEST len, ULONGEST *xfered_len)
1346 {
1347 struct target_section *secp;
1348 struct target_section_table *table;
1349
1350 secp = target_section_by_addr (ops, memaddr);
1351 if (secp != NULL
1352 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1353 secp->the_bfd_section)
1354 & SEC_READONLY))
1355 {
1356 struct target_section *p;
1357 ULONGEST memend = memaddr + len;
1358
1359 table = target_get_section_table (ops);
1360
1361 for (p = table->sections; p < table->sections_end; p++)
1362 {
1363 if (memaddr >= p->addr)
1364 {
1365 if (memend <= p->endaddr)
1366 {
1367 /* Entire transfer is within this section. */
1368 return target_read_live_memory (object, memaddr,
1369 readbuf, len, xfered_len);
1370 }
1371 else if (memaddr >= p->endaddr)
1372 {
1373 /* This section ends before the transfer starts. */
1374 continue;
1375 }
1376 else
1377 {
1378 /* This section overlaps the transfer. Just do half. */
1379 len = p->endaddr - memaddr;
1380 return target_read_live_memory (object, memaddr,
1381 readbuf, len, xfered_len);
1382 }
1383 }
1384 }
1385 }
1386
1387 return TARGET_XFER_EOF;
1388 }
1389
1390 /* Read memory from more than one valid target. A core file, for
1391 instance, could have some of memory but delegate other bits to
1392 the target below it. So, we must manually try all targets. */
1393
1394 static enum target_xfer_status
1395 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1396 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1397 ULONGEST *xfered_len)
1398 {
1399 enum target_xfer_status res;
1400
1401 do
1402 {
1403 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1404 readbuf, writebuf, memaddr, len,
1405 xfered_len);
1406 if (res == TARGET_XFER_OK)
1407 break;
1408
1409 /* Stop if the target reports that the memory is not available. */
1410 if (res == TARGET_XFER_E_UNAVAILABLE)
1411 break;
1412
1413 /* We want to continue past core files to executables, but not
1414 past a running target's memory. */
1415 if (ops->to_has_all_memory (ops))
1416 break;
1417
1418 ops = ops->beneath;
1419 }
1420 while (ops != NULL);
1421
1422 return res;
1423 }
1424
1425 /* Perform a partial memory transfer.
1426 For docs see target.h, to_xfer_partial. */
1427
1428 static enum target_xfer_status
1429 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1430 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1431 ULONGEST len, ULONGEST *xfered_len)
1432 {
1433 enum target_xfer_status res;
1434 int reg_len;
1435 struct mem_region *region;
1436 struct inferior *inf;
1437
1438 /* For accesses to unmapped overlay sections, read directly from
1439 files. Must do this first, as MEMADDR may need adjustment. */
1440 if (readbuf != NULL && overlay_debugging)
1441 {
1442 struct obj_section *section = find_pc_overlay (memaddr);
1443
1444 if (pc_in_unmapped_range (memaddr, section))
1445 {
1446 struct target_section_table *table
1447 = target_get_section_table (ops);
1448 const char *section_name = section->the_bfd_section->name;
1449
1450 memaddr = overlay_mapped_address (memaddr, section);
1451 return section_table_xfer_memory_partial (readbuf, writebuf,
1452 memaddr, len, xfered_len,
1453 table->sections,
1454 table->sections_end,
1455 section_name);
1456 }
1457 }
1458
1459 /* Try the executable files, if "trust-readonly-sections" is set. */
1460 if (readbuf != NULL && trust_readonly)
1461 {
1462 struct target_section *secp;
1463 struct target_section_table *table;
1464
1465 secp = target_section_by_addr (ops, memaddr);
1466 if (secp != NULL
1467 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1468 secp->the_bfd_section)
1469 & SEC_READONLY))
1470 {
1471 table = target_get_section_table (ops);
1472 return section_table_xfer_memory_partial (readbuf, writebuf,
1473 memaddr, len, xfered_len,
1474 table->sections,
1475 table->sections_end,
1476 NULL);
1477 }
1478 }
1479
1480 /* If reading unavailable memory in the context of traceframes, and
1481 this address falls within a read-only section, fallback to
1482 reading from live memory. */
1483 if (readbuf != NULL && get_traceframe_number () != -1)
1484 {
1485 VEC(mem_range_s) *available;
1486
1487 /* If we fail to get the set of available memory, then the
1488 target does not support querying traceframe info, and so we
1489 attempt reading from the traceframe anyway (assuming the
1490 target implements the old QTro packet then). */
1491 if (traceframe_available_memory (&available, memaddr, len))
1492 {
1493 struct cleanup *old_chain;
1494
1495 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1496
1497 if (VEC_empty (mem_range_s, available)
1498 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1499 {
1500 /* Don't read into the traceframe's available
1501 memory. */
1502 if (!VEC_empty (mem_range_s, available))
1503 {
1504 LONGEST oldlen = len;
1505
1506 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1507 gdb_assert (len <= oldlen);
1508 }
1509
1510 do_cleanups (old_chain);
1511
1512 /* This goes through the topmost target again. */
1513 res = memory_xfer_live_readonly_partial (ops, object,
1514 readbuf, memaddr,
1515 len, xfered_len);
1516 if (res == TARGET_XFER_OK)
1517 return TARGET_XFER_OK;
1518 else
1519 {
1520 /* No use trying further, we know some memory starting
1521 at MEMADDR isn't available. */
1522 *xfered_len = len;
1523 return TARGET_XFER_E_UNAVAILABLE;
1524 }
1525 }
1526
1527 /* Don't try to read more than how much is available, in
1528 case the target implements the deprecated QTro packet to
1529 cater for older GDBs (the target's knowledge of read-only
1530 sections may be outdated by now). */
1531 len = VEC_index (mem_range_s, available, 0)->length;
1532
1533 do_cleanups (old_chain);
1534 }
1535 }
1536
1537 /* Try GDB's internal data cache. */
1538 region = lookup_mem_region (memaddr);
1539 /* region->hi == 0 means there's no upper bound. */
1540 if (memaddr + len < region->hi || region->hi == 0)
1541 reg_len = len;
1542 else
1543 reg_len = region->hi - memaddr;
1544
1545 switch (region->attrib.mode)
1546 {
1547 case MEM_RO:
1548 if (writebuf != NULL)
1549 return TARGET_XFER_E_IO;
1550 break;
1551
1552 case MEM_WO:
1553 if (readbuf != NULL)
1554 return TARGET_XFER_E_IO;
1555 break;
1556
1557 case MEM_FLASH:
1558 /* We only support writing to flash during "load" for now. */
1559 if (writebuf != NULL)
1560 error (_("Writing to flash memory forbidden in this context"));
1561 break;
1562
1563 case MEM_NONE:
1564 return TARGET_XFER_E_IO;
1565 }
1566
1567 if (!ptid_equal (inferior_ptid, null_ptid))
1568 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1569 else
1570 inf = NULL;
1571
1572 if (inf != NULL
1573 /* The dcache reads whole cache lines; that doesn't play well
1574 with reading from a trace buffer, because reading outside of
1575 the collected memory range fails. */
1576 && get_traceframe_number () == -1
1577 && (region->attrib.cache
1578 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1579 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1580 {
1581 DCACHE *dcache = target_dcache_get_or_init ();
1582 int l;
1583
1584 if (readbuf != NULL)
1585 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1586 else
1587 /* FIXME drow/2006-08-09: If we're going to preserve const
1588 correctness dcache_xfer_memory should take readbuf and
1589 writebuf. */
1590 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1591 reg_len, 1);
1592 if (l <= 0)
1593 return TARGET_XFER_E_IO;
1594 else
1595 {
1596 *xfered_len = (ULONGEST) l;
1597 return TARGET_XFER_OK;
1598 }
1599 }
1600
1601 /* If none of those methods found the memory we wanted, fall back
1602 to a target partial transfer. Normally a single call to
1603 to_xfer_partial is enough; if it doesn't recognize an object
1604 it will call the to_xfer_partial of the next target down.
1605 But for memory this won't do. Memory is the only target
1606 object which can be read from more than one valid target.
1607 A core file, for instance, could have some of memory but
1608 delegate other bits to the target below it. So, we must
1609 manually try all targets. */
1610
1611 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1612 xfered_len);
1613
1614 /* Make sure the cache gets updated no matter what - if we are writing
1615 to the stack. Even if this write is not tagged as such, we still need
1616 to update the cache. */
1617
1618 if (res == TARGET_XFER_OK
1619 && inf != NULL
1620 && writebuf != NULL
1621 && target_dcache_init_p ()
1622 && !region->attrib.cache
1623 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1624 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1625 {
1626 DCACHE *dcache = target_dcache_get ();
1627
1628 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1629 }
1630
1631 /* If we still haven't got anything, return the last error. We
1632 give up. */
1633 return res;
1634 }
1635
1636 /* Perform a partial memory transfer. For docs see target.h,
1637 to_xfer_partial. */
1638
1639 static enum target_xfer_status
1640 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1641 gdb_byte *readbuf, const gdb_byte *writebuf,
1642 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1643 {
1644 enum target_xfer_status res;
1645
1646 /* Zero length requests are ok and require no work. */
1647 if (len == 0)
1648 return TARGET_XFER_EOF;
1649
1650 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1651 breakpoint insns, thus hiding out from higher layers whether
1652 there are software breakpoints inserted in the code stream. */
1653 if (readbuf != NULL)
1654 {
1655 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1656 xfered_len);
1657
1658 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1659 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1660 }
1661 else
1662 {
1663 void *buf;
1664 struct cleanup *old_chain;
1665
1666 /* A large write request is likely to be partially satisfied
1667 by memory_xfer_partial_1. We will continually malloc
1668 and free a copy of the entire write request for breakpoint
1669 shadow handling even though we only end up writing a small
1670 subset of it. Cap writes to 4KB to mitigate this. */
1671 len = min (4096, len);
1672
1673 buf = xmalloc (len);
1674 old_chain = make_cleanup (xfree, buf);
1675 memcpy (buf, writebuf, len);
1676
1677 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1678 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1679 xfered_len);
1680
1681 do_cleanups (old_chain);
1682 }
1683
1684 return res;
1685 }
1686
1687 static void
1688 restore_show_memory_breakpoints (void *arg)
1689 {
1690 show_memory_breakpoints = (uintptr_t) arg;
1691 }
1692
1693 struct cleanup *
1694 make_show_memory_breakpoints_cleanup (int show)
1695 {
1696 int current = show_memory_breakpoints;
1697
1698 show_memory_breakpoints = show;
1699 return make_cleanup (restore_show_memory_breakpoints,
1700 (void *) (uintptr_t) current);
1701 }
1702
1703 /* For docs see target.h, to_xfer_partial. */
1704
1705 enum target_xfer_status
1706 target_xfer_partial (struct target_ops *ops,
1707 enum target_object object, const char *annex,
1708 gdb_byte *readbuf, const gdb_byte *writebuf,
1709 ULONGEST offset, ULONGEST len,
1710 ULONGEST *xfered_len)
1711 {
1712 enum target_xfer_status retval;
1713
1714 gdb_assert (ops->to_xfer_partial != NULL);
1715
1716 /* Transfer is done when LEN is zero. */
1717 if (len == 0)
1718 return TARGET_XFER_EOF;
1719
1720 if (writebuf && !may_write_memory)
1721 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1722 core_addr_to_string_nz (offset), plongest (len));
1723
1724 *xfered_len = 0;
1725
1726 /* If this is a memory transfer, let the memory-specific code
1727 have a look at it instead. Memory transfers are more
1728 complicated. */
1729 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1730 || object == TARGET_OBJECT_CODE_MEMORY)
1731 retval = memory_xfer_partial (ops, object, readbuf,
1732 writebuf, offset, len, xfered_len);
1733 else if (object == TARGET_OBJECT_RAW_MEMORY)
1734 {
1735 /* Request the normal memory object from other layers. */
1736 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1737 xfered_len);
1738 }
1739 else
1740 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1741 writebuf, offset, len, xfered_len);
1742
1743 if (targetdebug)
1744 {
1745 const unsigned char *myaddr = NULL;
1746
1747 fprintf_unfiltered (gdb_stdlog,
1748 "%s:target_xfer_partial "
1749 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1750 ops->to_shortname,
1751 (int) object,
1752 (annex ? annex : "(null)"),
1753 host_address_to_string (readbuf),
1754 host_address_to_string (writebuf),
1755 core_addr_to_string_nz (offset),
1756 pulongest (len), retval,
1757 pulongest (*xfered_len));
1758
1759 if (readbuf)
1760 myaddr = readbuf;
1761 if (writebuf)
1762 myaddr = writebuf;
1763 if (retval == TARGET_XFER_OK && myaddr != NULL)
1764 {
1765 int i;
1766
1767 fputs_unfiltered (", bytes =", gdb_stdlog);
1768 for (i = 0; i < *xfered_len; i++)
1769 {
1770 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1771 {
1772 if (targetdebug < 2 && i > 0)
1773 {
1774 fprintf_unfiltered (gdb_stdlog, " ...");
1775 break;
1776 }
1777 fprintf_unfiltered (gdb_stdlog, "\n");
1778 }
1779
1780 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1781 }
1782 }
1783
1784 fputc_unfiltered ('\n', gdb_stdlog);
1785 }
1786
1787 /* Check implementations of to_xfer_partial update *XFERED_LEN
1788 properly. Do assertion after printing debug messages, so that we
1789 can find more clues on assertion failure from debugging messages. */
1790 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1791 gdb_assert (*xfered_len > 0);
1792
1793 return retval;
1794 }
1795
1796 /* Read LEN bytes of target memory at address MEMADDR, placing the
1797 results in GDB's memory at MYADDR. Returns either 0 for success or
1798 TARGET_XFER_E_IO if any error occurs.
1799
1800 If an error occurs, no guarantee is made about the contents of the data at
1801 MYADDR. In particular, the caller should not depend upon partial reads
1802 filling the buffer with good data. There is no way for the caller to know
1803 how much good data might have been transfered anyway. Callers that can
1804 deal with partial reads should call target_read (which will retry until
1805 it makes no progress, and then return how much was transferred). */
1806
1807 int
1808 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1809 {
1810 /* Dispatch to the topmost target, not the flattened current_target.
1811 Memory accesses check target->to_has_(all_)memory, and the
1812 flattened target doesn't inherit those. */
1813 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1814 myaddr, memaddr, len) == len)
1815 return 0;
1816 else
1817 return TARGET_XFER_E_IO;
1818 }
1819
1820 /* Like target_read_memory, but specify explicitly that this is a read
1821 from the target's raw memory. That is, this read bypasses the
1822 dcache, breakpoint shadowing, etc. */
1823
1824 int
1825 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1826 {
1827 /* See comment in target_read_memory about why the request starts at
1828 current_target.beneath. */
1829 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1830 myaddr, memaddr, len) == len)
1831 return 0;
1832 else
1833 return TARGET_XFER_E_IO;
1834 }
1835
1836 /* Like target_read_memory, but specify explicitly that this is a read from
1837 the target's stack. This may trigger different cache behavior. */
1838
1839 int
1840 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1841 {
1842 /* See comment in target_read_memory about why the request starts at
1843 current_target.beneath. */
1844 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1845 myaddr, memaddr, len) == len)
1846 return 0;
1847 else
1848 return TARGET_XFER_E_IO;
1849 }
1850
1851 /* Like target_read_memory, but specify explicitly that this is a read from
1852 the target's code. This may trigger different cache behavior. */
1853
1854 int
1855 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1856 {
1857 /* See comment in target_read_memory about why the request starts at
1858 current_target.beneath. */
1859 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1860 myaddr, memaddr, len) == len)
1861 return 0;
1862 else
1863 return TARGET_XFER_E_IO;
1864 }
1865
1866 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1867 Returns either 0 for success or TARGET_XFER_E_IO if any
1868 error occurs. If an error occurs, no guarantee is made about how
1869 much data got written. Callers that can deal with partial writes
1870 should call target_write. */
1871
1872 int
1873 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1874 {
1875 /* See comment in target_read_memory about why the request starts at
1876 current_target.beneath. */
1877 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1878 myaddr, memaddr, len) == len)
1879 return 0;
1880 else
1881 return TARGET_XFER_E_IO;
1882 }
1883
1884 /* Write LEN bytes from MYADDR to target raw memory at address
1885 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1886 if any error occurs. If an error occurs, no guarantee is made
1887 about how much data got written. Callers that can deal with
1888 partial writes should call target_write. */
1889
1890 int
1891 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1892 {
1893 /* See comment in target_read_memory about why the request starts at
1894 current_target.beneath. */
1895 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1896 myaddr, memaddr, len) == len)
1897 return 0;
1898 else
1899 return TARGET_XFER_E_IO;
1900 }
1901
1902 /* Fetch the target's memory map. */
1903
1904 VEC(mem_region_s) *
1905 target_memory_map (void)
1906 {
1907 VEC(mem_region_s) *result;
1908 struct mem_region *last_one, *this_one;
1909 int ix;
1910 struct target_ops *t;
1911
1912 if (targetdebug)
1913 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1914
1915 for (t = current_target.beneath; t != NULL; t = t->beneath)
1916 if (t->to_memory_map != NULL)
1917 break;
1918
1919 if (t == NULL)
1920 return NULL;
1921
1922 result = t->to_memory_map (t);
1923 if (result == NULL)
1924 return NULL;
1925
1926 qsort (VEC_address (mem_region_s, result),
1927 VEC_length (mem_region_s, result),
1928 sizeof (struct mem_region), mem_region_cmp);
1929
1930 /* Check that regions do not overlap. Simultaneously assign
1931 a numbering for the "mem" commands to use to refer to
1932 each region. */
1933 last_one = NULL;
1934 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1935 {
1936 this_one->number = ix;
1937
1938 if (last_one && last_one->hi > this_one->lo)
1939 {
1940 warning (_("Overlapping regions in memory map: ignoring"));
1941 VEC_free (mem_region_s, result);
1942 return NULL;
1943 }
1944 last_one = this_one;
1945 }
1946
1947 return result;
1948 }
1949
1950 void
1951 target_flash_erase (ULONGEST address, LONGEST length)
1952 {
1953 struct target_ops *t;
1954
1955 for (t = current_target.beneath; t != NULL; t = t->beneath)
1956 if (t->to_flash_erase != NULL)
1957 {
1958 if (targetdebug)
1959 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1960 hex_string (address), phex (length, 0));
1961 t->to_flash_erase (t, address, length);
1962 return;
1963 }
1964
1965 tcomplain ();
1966 }
1967
1968 void
1969 target_flash_done (void)
1970 {
1971 struct target_ops *t;
1972
1973 for (t = current_target.beneath; t != NULL; t = t->beneath)
1974 if (t->to_flash_done != NULL)
1975 {
1976 if (targetdebug)
1977 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1978 t->to_flash_done (t);
1979 return;
1980 }
1981
1982 tcomplain ();
1983 }
1984
1985 static void
1986 show_trust_readonly (struct ui_file *file, int from_tty,
1987 struct cmd_list_element *c, const char *value)
1988 {
1989 fprintf_filtered (file,
1990 _("Mode for reading from readonly sections is %s.\n"),
1991 value);
1992 }
1993
1994 /* More generic transfers. */
1995
1996 static enum target_xfer_status
1997 default_xfer_partial (struct target_ops *ops, enum target_object object,
1998 const char *annex, gdb_byte *readbuf,
1999 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2000 ULONGEST *xfered_len)
2001 {
2002 if (object == TARGET_OBJECT_MEMORY
2003 && ops->deprecated_xfer_memory != NULL)
2004 /* If available, fall back to the target's
2005 "deprecated_xfer_memory" method. */
2006 {
2007 int xfered = -1;
2008
2009 errno = 0;
2010 if (writebuf != NULL)
2011 {
2012 void *buffer = xmalloc (len);
2013 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2014
2015 memcpy (buffer, writebuf, len);
2016 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2017 1/*write*/, NULL, ops);
2018 do_cleanups (cleanup);
2019 }
2020 if (readbuf != NULL)
2021 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2022 0/*read*/, NULL, ops);
2023 if (xfered > 0)
2024 {
2025 *xfered_len = (ULONGEST) xfered;
2026 return TARGET_XFER_E_IO;
2027 }
2028 else if (xfered == 0 && errno == 0)
2029 /* "deprecated_xfer_memory" uses 0, cross checked against
2030 ERRNO as one indication of an error. */
2031 return TARGET_XFER_EOF;
2032 else
2033 return TARGET_XFER_E_IO;
2034 }
2035 else
2036 {
2037 gdb_assert (ops->beneath != NULL);
2038 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2039 readbuf, writebuf, offset, len,
2040 xfered_len);
2041 }
2042 }
2043
2044 /* Target vector read/write partial wrapper functions. */
2045
2046 static enum target_xfer_status
2047 target_read_partial (struct target_ops *ops,
2048 enum target_object object,
2049 const char *annex, gdb_byte *buf,
2050 ULONGEST offset, ULONGEST len,
2051 ULONGEST *xfered_len)
2052 {
2053 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2054 xfered_len);
2055 }
2056
2057 static enum target_xfer_status
2058 target_write_partial (struct target_ops *ops,
2059 enum target_object object,
2060 const char *annex, const gdb_byte *buf,
2061 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2062 {
2063 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2064 xfered_len);
2065 }
2066
2067 /* Wrappers to perform the full transfer. */
2068
2069 /* For docs on target_read see target.h. */
2070
2071 LONGEST
2072 target_read (struct target_ops *ops,
2073 enum target_object object,
2074 const char *annex, gdb_byte *buf,
2075 ULONGEST offset, LONGEST len)
2076 {
2077 LONGEST xfered = 0;
2078
2079 while (xfered < len)
2080 {
2081 ULONGEST xfered_len;
2082 enum target_xfer_status status;
2083
2084 status = target_read_partial (ops, object, annex,
2085 (gdb_byte *) buf + xfered,
2086 offset + xfered, len - xfered,
2087 &xfered_len);
2088
2089 /* Call an observer, notifying them of the xfer progress? */
2090 if (status == TARGET_XFER_EOF)
2091 return xfered;
2092 else if (status == TARGET_XFER_OK)
2093 {
2094 xfered += xfered_len;
2095 QUIT;
2096 }
2097 else
2098 return -1;
2099
2100 }
2101 return len;
2102 }
2103
2104 /* Assuming that the entire [begin, end) range of memory cannot be
2105 read, try to read whatever subrange is possible to read.
2106
2107 The function returns, in RESULT, either zero or one memory block.
2108 If there's a readable subrange at the beginning, it is completely
2109 read and returned. Any further readable subrange will not be read.
2110 Otherwise, if there's a readable subrange at the end, it will be
2111 completely read and returned. Any readable subranges before it
2112 (obviously, not starting at the beginning), will be ignored. In
2113 other cases -- either no readable subrange, or readable subrange(s)
2114 that is neither at the beginning, or end, nothing is returned.
2115
2116 The purpose of this function is to handle a read across a boundary
2117 of accessible memory in a case when memory map is not available.
2118 The above restrictions are fine for this case, but will give
2119 incorrect results if the memory is 'patchy'. However, supporting
2120 'patchy' memory would require trying to read every single byte,
2121 and it seems unacceptable solution. Explicit memory map is
2122 recommended for this case -- and target_read_memory_robust will
2123 take care of reading multiple ranges then. */
2124
2125 static void
2126 read_whatever_is_readable (struct target_ops *ops,
2127 ULONGEST begin, ULONGEST end,
2128 VEC(memory_read_result_s) **result)
2129 {
2130 gdb_byte *buf = xmalloc (end - begin);
2131 ULONGEST current_begin = begin;
2132 ULONGEST current_end = end;
2133 int forward;
2134 memory_read_result_s r;
2135 ULONGEST xfered_len;
2136
2137 /* If we previously failed to read 1 byte, nothing can be done here. */
2138 if (end - begin <= 1)
2139 {
2140 xfree (buf);
2141 return;
2142 }
2143
2144 /* Check that either first or the last byte is readable, and give up
2145 if not. This heuristic is meant to permit reading accessible memory
2146 at the boundary of accessible region. */
2147 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2148 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2149 {
2150 forward = 1;
2151 ++current_begin;
2152 }
2153 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2154 buf + (end-begin) - 1, end - 1, 1,
2155 &xfered_len) == TARGET_XFER_OK)
2156 {
2157 forward = 0;
2158 --current_end;
2159 }
2160 else
2161 {
2162 xfree (buf);
2163 return;
2164 }
2165
2166 /* Loop invariant is that the [current_begin, current_end) was previously
2167 found to be not readable as a whole.
2168
2169 Note loop condition -- if the range has 1 byte, we can't divide the range
2170 so there's no point trying further. */
2171 while (current_end - current_begin > 1)
2172 {
2173 ULONGEST first_half_begin, first_half_end;
2174 ULONGEST second_half_begin, second_half_end;
2175 LONGEST xfer;
2176 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2177
2178 if (forward)
2179 {
2180 first_half_begin = current_begin;
2181 first_half_end = middle;
2182 second_half_begin = middle;
2183 second_half_end = current_end;
2184 }
2185 else
2186 {
2187 first_half_begin = middle;
2188 first_half_end = current_end;
2189 second_half_begin = current_begin;
2190 second_half_end = middle;
2191 }
2192
2193 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2194 buf + (first_half_begin - begin),
2195 first_half_begin,
2196 first_half_end - first_half_begin);
2197
2198 if (xfer == first_half_end - first_half_begin)
2199 {
2200 /* This half reads up fine. So, the error must be in the
2201 other half. */
2202 current_begin = second_half_begin;
2203 current_end = second_half_end;
2204 }
2205 else
2206 {
2207 /* This half is not readable. Because we've tried one byte, we
2208 know some part of this half if actually redable. Go to the next
2209 iteration to divide again and try to read.
2210
2211 We don't handle the other half, because this function only tries
2212 to read a single readable subrange. */
2213 current_begin = first_half_begin;
2214 current_end = first_half_end;
2215 }
2216 }
2217
2218 if (forward)
2219 {
2220 /* The [begin, current_begin) range has been read. */
2221 r.begin = begin;
2222 r.end = current_begin;
2223 r.data = buf;
2224 }
2225 else
2226 {
2227 /* The [current_end, end) range has been read. */
2228 LONGEST rlen = end - current_end;
2229
2230 r.data = xmalloc (rlen);
2231 memcpy (r.data, buf + current_end - begin, rlen);
2232 r.begin = current_end;
2233 r.end = end;
2234 xfree (buf);
2235 }
2236 VEC_safe_push(memory_read_result_s, (*result), &r);
2237 }
2238
2239 void
2240 free_memory_read_result_vector (void *x)
2241 {
2242 VEC(memory_read_result_s) *v = x;
2243 memory_read_result_s *current;
2244 int ix;
2245
2246 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2247 {
2248 xfree (current->data);
2249 }
2250 VEC_free (memory_read_result_s, v);
2251 }
2252
2253 VEC(memory_read_result_s) *
2254 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2255 {
2256 VEC(memory_read_result_s) *result = 0;
2257
2258 LONGEST xfered = 0;
2259 while (xfered < len)
2260 {
2261 struct mem_region *region = lookup_mem_region (offset + xfered);
2262 LONGEST rlen;
2263
2264 /* If there is no explicit region, a fake one should be created. */
2265 gdb_assert (region);
2266
2267 if (region->hi == 0)
2268 rlen = len - xfered;
2269 else
2270 rlen = region->hi - offset;
2271
2272 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2273 {
2274 /* Cannot read this region. Note that we can end up here only
2275 if the region is explicitly marked inaccessible, or
2276 'inaccessible-by-default' is in effect. */
2277 xfered += rlen;
2278 }
2279 else
2280 {
2281 LONGEST to_read = min (len - xfered, rlen);
2282 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2283
2284 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2285 (gdb_byte *) buffer,
2286 offset + xfered, to_read);
2287 /* Call an observer, notifying them of the xfer progress? */
2288 if (xfer <= 0)
2289 {
2290 /* Got an error reading full chunk. See if maybe we can read
2291 some subrange. */
2292 xfree (buffer);
2293 read_whatever_is_readable (ops, offset + xfered,
2294 offset + xfered + to_read, &result);
2295 xfered += to_read;
2296 }
2297 else
2298 {
2299 struct memory_read_result r;
2300 r.data = buffer;
2301 r.begin = offset + xfered;
2302 r.end = r.begin + xfer;
2303 VEC_safe_push (memory_read_result_s, result, &r);
2304 xfered += xfer;
2305 }
2306 QUIT;
2307 }
2308 }
2309 return result;
2310 }
2311
2312
2313 /* An alternative to target_write with progress callbacks. */
2314
2315 LONGEST
2316 target_write_with_progress (struct target_ops *ops,
2317 enum target_object object,
2318 const char *annex, const gdb_byte *buf,
2319 ULONGEST offset, LONGEST len,
2320 void (*progress) (ULONGEST, void *), void *baton)
2321 {
2322 LONGEST xfered = 0;
2323
2324 /* Give the progress callback a chance to set up. */
2325 if (progress)
2326 (*progress) (0, baton);
2327
2328 while (xfered < len)
2329 {
2330 ULONGEST xfered_len;
2331 enum target_xfer_status status;
2332
2333 status = target_write_partial (ops, object, annex,
2334 (gdb_byte *) buf + xfered,
2335 offset + xfered, len - xfered,
2336 &xfered_len);
2337
2338 if (status == TARGET_XFER_EOF)
2339 return xfered;
2340 if (TARGET_XFER_STATUS_ERROR_P (status))
2341 return -1;
2342
2343 gdb_assert (status == TARGET_XFER_OK);
2344 if (progress)
2345 (*progress) (xfered_len, baton);
2346
2347 xfered += xfered_len;
2348 QUIT;
2349 }
2350 return len;
2351 }
2352
2353 /* For docs on target_write see target.h. */
2354
2355 LONGEST
2356 target_write (struct target_ops *ops,
2357 enum target_object object,
2358 const char *annex, const gdb_byte *buf,
2359 ULONGEST offset, LONGEST len)
2360 {
2361 return target_write_with_progress (ops, object, annex, buf, offset, len,
2362 NULL, NULL);
2363 }
2364
2365 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2366 the size of the transferred data. PADDING additional bytes are
2367 available in *BUF_P. This is a helper function for
2368 target_read_alloc; see the declaration of that function for more
2369 information. */
2370
2371 static LONGEST
2372 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2373 const char *annex, gdb_byte **buf_p, int padding)
2374 {
2375 size_t buf_alloc, buf_pos;
2376 gdb_byte *buf;
2377
2378 /* This function does not have a length parameter; it reads the
2379 entire OBJECT). Also, it doesn't support objects fetched partly
2380 from one target and partly from another (in a different stratum,
2381 e.g. a core file and an executable). Both reasons make it
2382 unsuitable for reading memory. */
2383 gdb_assert (object != TARGET_OBJECT_MEMORY);
2384
2385 /* Start by reading up to 4K at a time. The target will throttle
2386 this number down if necessary. */
2387 buf_alloc = 4096;
2388 buf = xmalloc (buf_alloc);
2389 buf_pos = 0;
2390 while (1)
2391 {
2392 ULONGEST xfered_len;
2393 enum target_xfer_status status;
2394
2395 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2396 buf_pos, buf_alloc - buf_pos - padding,
2397 &xfered_len);
2398
2399 if (status == TARGET_XFER_EOF)
2400 {
2401 /* Read all there was. */
2402 if (buf_pos == 0)
2403 xfree (buf);
2404 else
2405 *buf_p = buf;
2406 return buf_pos;
2407 }
2408 else if (status != TARGET_XFER_OK)
2409 {
2410 /* An error occurred. */
2411 xfree (buf);
2412 return TARGET_XFER_E_IO;
2413 }
2414
2415 buf_pos += xfered_len;
2416
2417 /* If the buffer is filling up, expand it. */
2418 if (buf_alloc < buf_pos * 2)
2419 {
2420 buf_alloc *= 2;
2421 buf = xrealloc (buf, buf_alloc);
2422 }
2423
2424 QUIT;
2425 }
2426 }
2427
2428 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2429 the size of the transferred data. See the declaration in "target.h"
2430 function for more information about the return value. */
2431
2432 LONGEST
2433 target_read_alloc (struct target_ops *ops, enum target_object object,
2434 const char *annex, gdb_byte **buf_p)
2435 {
2436 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2437 }
2438
2439 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2440 returned as a string, allocated using xmalloc. If an error occurs
2441 or the transfer is unsupported, NULL is returned. Empty objects
2442 are returned as allocated but empty strings. A warning is issued
2443 if the result contains any embedded NUL bytes. */
2444
2445 char *
2446 target_read_stralloc (struct target_ops *ops, enum target_object object,
2447 const char *annex)
2448 {
2449 gdb_byte *buffer;
2450 char *bufstr;
2451 LONGEST i, transferred;
2452
2453 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2454 bufstr = (char *) buffer;
2455
2456 if (transferred < 0)
2457 return NULL;
2458
2459 if (transferred == 0)
2460 return xstrdup ("");
2461
2462 bufstr[transferred] = 0;
2463
2464 /* Check for embedded NUL bytes; but allow trailing NULs. */
2465 for (i = strlen (bufstr); i < transferred; i++)
2466 if (bufstr[i] != 0)
2467 {
2468 warning (_("target object %d, annex %s, "
2469 "contained unexpected null characters"),
2470 (int) object, annex ? annex : "(none)");
2471 break;
2472 }
2473
2474 return bufstr;
2475 }
2476
2477 /* Memory transfer methods. */
2478
2479 void
2480 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2481 LONGEST len)
2482 {
2483 /* This method is used to read from an alternate, non-current
2484 target. This read must bypass the overlay support (as symbols
2485 don't match this target), and GDB's internal cache (wrong cache
2486 for this target). */
2487 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2488 != len)
2489 memory_error (TARGET_XFER_E_IO, addr);
2490 }
2491
2492 ULONGEST
2493 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2494 int len, enum bfd_endian byte_order)
2495 {
2496 gdb_byte buf[sizeof (ULONGEST)];
2497
2498 gdb_assert (len <= sizeof (buf));
2499 get_target_memory (ops, addr, buf, len);
2500 return extract_unsigned_integer (buf, len, byte_order);
2501 }
2502
2503 /* See target.h. */
2504
2505 int
2506 target_insert_breakpoint (struct gdbarch *gdbarch,
2507 struct bp_target_info *bp_tgt)
2508 {
2509 if (!may_insert_breakpoints)
2510 {
2511 warning (_("May not insert breakpoints"));
2512 return 1;
2513 }
2514
2515 return current_target.to_insert_breakpoint (&current_target,
2516 gdbarch, bp_tgt);
2517 }
2518
2519 /* See target.h. */
2520
2521 int
2522 target_remove_breakpoint (struct gdbarch *gdbarch,
2523 struct bp_target_info *bp_tgt)
2524 {
2525 /* This is kind of a weird case to handle, but the permission might
2526 have been changed after breakpoints were inserted - in which case
2527 we should just take the user literally and assume that any
2528 breakpoints should be left in place. */
2529 if (!may_insert_breakpoints)
2530 {
2531 warning (_("May not remove breakpoints"));
2532 return 1;
2533 }
2534
2535 return current_target.to_remove_breakpoint (&current_target,
2536 gdbarch, bp_tgt);
2537 }
2538
2539 static void
2540 target_info (char *args, int from_tty)
2541 {
2542 struct target_ops *t;
2543 int has_all_mem = 0;
2544
2545 if (symfile_objfile != NULL)
2546 printf_unfiltered (_("Symbols from \"%s\".\n"),
2547 objfile_name (symfile_objfile));
2548
2549 for (t = target_stack; t != NULL; t = t->beneath)
2550 {
2551 if (!(*t->to_has_memory) (t))
2552 continue;
2553
2554 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2555 continue;
2556 if (has_all_mem)
2557 printf_unfiltered (_("\tWhile running this, "
2558 "GDB does not access memory from...\n"));
2559 printf_unfiltered ("%s:\n", t->to_longname);
2560 (t->to_files_info) (t);
2561 has_all_mem = (*t->to_has_all_memory) (t);
2562 }
2563 }
2564
2565 /* This function is called before any new inferior is created, e.g.
2566 by running a program, attaching, or connecting to a target.
2567 It cleans up any state from previous invocations which might
2568 change between runs. This is a subset of what target_preopen
2569 resets (things which might change between targets). */
2570
2571 void
2572 target_pre_inferior (int from_tty)
2573 {
2574 /* Clear out solib state. Otherwise the solib state of the previous
2575 inferior might have survived and is entirely wrong for the new
2576 target. This has been observed on GNU/Linux using glibc 2.3. How
2577 to reproduce:
2578
2579 bash$ ./foo&
2580 [1] 4711
2581 bash$ ./foo&
2582 [1] 4712
2583 bash$ gdb ./foo
2584 [...]
2585 (gdb) attach 4711
2586 (gdb) detach
2587 (gdb) attach 4712
2588 Cannot access memory at address 0xdeadbeef
2589 */
2590
2591 /* In some OSs, the shared library list is the same/global/shared
2592 across inferiors. If code is shared between processes, so are
2593 memory regions and features. */
2594 if (!gdbarch_has_global_solist (target_gdbarch ()))
2595 {
2596 no_shared_libraries (NULL, from_tty);
2597
2598 invalidate_target_mem_regions ();
2599
2600 target_clear_description ();
2601 }
2602
2603 agent_capability_invalidate ();
2604 }
2605
2606 /* Callback for iterate_over_inferiors. Gets rid of the given
2607 inferior. */
2608
2609 static int
2610 dispose_inferior (struct inferior *inf, void *args)
2611 {
2612 struct thread_info *thread;
2613
2614 thread = any_thread_of_process (inf->pid);
2615 if (thread)
2616 {
2617 switch_to_thread (thread->ptid);
2618
2619 /* Core inferiors actually should be detached, not killed. */
2620 if (target_has_execution)
2621 target_kill ();
2622 else
2623 target_detach (NULL, 0);
2624 }
2625
2626 return 0;
2627 }
2628
2629 /* This is to be called by the open routine before it does
2630 anything. */
2631
2632 void
2633 target_preopen (int from_tty)
2634 {
2635 dont_repeat ();
2636
2637 if (have_inferiors ())
2638 {
2639 if (!from_tty
2640 || !have_live_inferiors ()
2641 || query (_("A program is being debugged already. Kill it? ")))
2642 iterate_over_inferiors (dispose_inferior, NULL);
2643 else
2644 error (_("Program not killed."));
2645 }
2646
2647 /* Calling target_kill may remove the target from the stack. But if
2648 it doesn't (which seems like a win for UDI), remove it now. */
2649 /* Leave the exec target, though. The user may be switching from a
2650 live process to a core of the same program. */
2651 pop_all_targets_above (file_stratum);
2652
2653 target_pre_inferior (from_tty);
2654 }
2655
2656 /* Detach a target after doing deferred register stores. */
2657
2658 void
2659 target_detach (const char *args, int from_tty)
2660 {
2661 struct target_ops* t;
2662
2663 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2664 /* Don't remove global breakpoints here. They're removed on
2665 disconnection from the target. */
2666 ;
2667 else
2668 /* If we're in breakpoints-always-inserted mode, have to remove
2669 them before detaching. */
2670 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2671
2672 prepare_for_detach ();
2673
2674 for (t = current_target.beneath; t != NULL; t = t->beneath)
2675 {
2676 if (t->to_detach != NULL)
2677 {
2678 t->to_detach (t, args, from_tty);
2679 if (targetdebug)
2680 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2681 args, from_tty);
2682 return;
2683 }
2684 }
2685
2686 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2687 }
2688
2689 void
2690 target_disconnect (char *args, int from_tty)
2691 {
2692 struct target_ops *t;
2693
2694 /* If we're in breakpoints-always-inserted mode or if breakpoints
2695 are global across processes, we have to remove them before
2696 disconnecting. */
2697 remove_breakpoints ();
2698
2699 for (t = current_target.beneath; t != NULL; t = t->beneath)
2700 if (t->to_disconnect != NULL)
2701 {
2702 if (targetdebug)
2703 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2704 args, from_tty);
2705 t->to_disconnect (t, args, from_tty);
2706 return;
2707 }
2708
2709 tcomplain ();
2710 }
2711
2712 ptid_t
2713 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2714 {
2715 struct target_ops *t;
2716 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2717 status, options);
2718
2719 if (targetdebug)
2720 {
2721 char *status_string;
2722 char *options_string;
2723
2724 status_string = target_waitstatus_to_string (status);
2725 options_string = target_options_to_string (options);
2726 fprintf_unfiltered (gdb_stdlog,
2727 "target_wait (%d, status, options={%s})"
2728 " = %d, %s\n",
2729 ptid_get_pid (ptid), options_string,
2730 ptid_get_pid (retval), status_string);
2731 xfree (status_string);
2732 xfree (options_string);
2733 }
2734
2735 return retval;
2736 }
2737
2738 char *
2739 target_pid_to_str (ptid_t ptid)
2740 {
2741 struct target_ops *t;
2742
2743 for (t = current_target.beneath; t != NULL; t = t->beneath)
2744 {
2745 if (t->to_pid_to_str != NULL)
2746 return (*t->to_pid_to_str) (t, ptid);
2747 }
2748
2749 return normal_pid_to_str (ptid);
2750 }
2751
2752 char *
2753 target_thread_name (struct thread_info *info)
2754 {
2755 struct target_ops *t;
2756
2757 for (t = current_target.beneath; t != NULL; t = t->beneath)
2758 {
2759 if (t->to_thread_name != NULL)
2760 return (*t->to_thread_name) (info);
2761 }
2762
2763 return NULL;
2764 }
2765
2766 void
2767 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2768 {
2769 struct target_ops *t;
2770
2771 target_dcache_invalidate ();
2772
2773 current_target.to_resume (&current_target, ptid, step, signal);
2774 if (targetdebug)
2775 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2776 ptid_get_pid (ptid),
2777 step ? "step" : "continue",
2778 gdb_signal_to_name (signal));
2779
2780 registers_changed_ptid (ptid);
2781 set_executing (ptid, 1);
2782 set_running (ptid, 1);
2783 clear_inline_frame_state (ptid);
2784 }
2785
2786 void
2787 target_pass_signals (int numsigs, unsigned char *pass_signals)
2788 {
2789 struct target_ops *t;
2790
2791 for (t = current_target.beneath; t != NULL; t = t->beneath)
2792 {
2793 if (t->to_pass_signals != NULL)
2794 {
2795 if (targetdebug)
2796 {
2797 int i;
2798
2799 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2800 numsigs);
2801
2802 for (i = 0; i < numsigs; i++)
2803 if (pass_signals[i])
2804 fprintf_unfiltered (gdb_stdlog, " %s",
2805 gdb_signal_to_name (i));
2806
2807 fprintf_unfiltered (gdb_stdlog, " })\n");
2808 }
2809
2810 (*t->to_pass_signals) (numsigs, pass_signals);
2811 return;
2812 }
2813 }
2814 }
2815
2816 void
2817 target_program_signals (int numsigs, unsigned char *program_signals)
2818 {
2819 struct target_ops *t;
2820
2821 for (t = current_target.beneath; t != NULL; t = t->beneath)
2822 {
2823 if (t->to_program_signals != NULL)
2824 {
2825 if (targetdebug)
2826 {
2827 int i;
2828
2829 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2830 numsigs);
2831
2832 for (i = 0; i < numsigs; i++)
2833 if (program_signals[i])
2834 fprintf_unfiltered (gdb_stdlog, " %s",
2835 gdb_signal_to_name (i));
2836
2837 fprintf_unfiltered (gdb_stdlog, " })\n");
2838 }
2839
2840 (*t->to_program_signals) (numsigs, program_signals);
2841 return;
2842 }
2843 }
2844 }
2845
2846 /* Look through the list of possible targets for a target that can
2847 follow forks. */
2848
2849 int
2850 target_follow_fork (int follow_child, int detach_fork)
2851 {
2852 struct target_ops *t;
2853
2854 for (t = current_target.beneath; t != NULL; t = t->beneath)
2855 {
2856 if (t->to_follow_fork != NULL)
2857 {
2858 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2859
2860 if (targetdebug)
2861 fprintf_unfiltered (gdb_stdlog,
2862 "target_follow_fork (%d, %d) = %d\n",
2863 follow_child, detach_fork, retval);
2864 return retval;
2865 }
2866 }
2867
2868 /* Some target returned a fork event, but did not know how to follow it. */
2869 internal_error (__FILE__, __LINE__,
2870 _("could not find a target to follow fork"));
2871 }
2872
2873 void
2874 target_mourn_inferior (void)
2875 {
2876 struct target_ops *t;
2877
2878 for (t = current_target.beneath; t != NULL; t = t->beneath)
2879 {
2880 if (t->to_mourn_inferior != NULL)
2881 {
2882 t->to_mourn_inferior (t);
2883 if (targetdebug)
2884 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2885
2886 /* We no longer need to keep handles on any of the object files.
2887 Make sure to release them to avoid unnecessarily locking any
2888 of them while we're not actually debugging. */
2889 bfd_cache_close_all ();
2890
2891 return;
2892 }
2893 }
2894
2895 internal_error (__FILE__, __LINE__,
2896 _("could not find a target to follow mourn inferior"));
2897 }
2898
2899 /* Look for a target which can describe architectural features, starting
2900 from TARGET. If we find one, return its description. */
2901
2902 const struct target_desc *
2903 target_read_description (struct target_ops *target)
2904 {
2905 struct target_ops *t;
2906
2907 for (t = target; t != NULL; t = t->beneath)
2908 if (t->to_read_description != NULL)
2909 {
2910 const struct target_desc *tdesc;
2911
2912 tdesc = t->to_read_description (t);
2913 if (tdesc)
2914 return tdesc;
2915 }
2916
2917 return NULL;
2918 }
2919
2920 /* The default implementation of to_search_memory.
2921 This implements a basic search of memory, reading target memory and
2922 performing the search here (as opposed to performing the search in on the
2923 target side with, for example, gdbserver). */
2924
2925 int
2926 simple_search_memory (struct target_ops *ops,
2927 CORE_ADDR start_addr, ULONGEST search_space_len,
2928 const gdb_byte *pattern, ULONGEST pattern_len,
2929 CORE_ADDR *found_addrp)
2930 {
2931 /* NOTE: also defined in find.c testcase. */
2932 #define SEARCH_CHUNK_SIZE 16000
2933 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2934 /* Buffer to hold memory contents for searching. */
2935 gdb_byte *search_buf;
2936 unsigned search_buf_size;
2937 struct cleanup *old_cleanups;
2938
2939 search_buf_size = chunk_size + pattern_len - 1;
2940
2941 /* No point in trying to allocate a buffer larger than the search space. */
2942 if (search_space_len < search_buf_size)
2943 search_buf_size = search_space_len;
2944
2945 search_buf = malloc (search_buf_size);
2946 if (search_buf == NULL)
2947 error (_("Unable to allocate memory to perform the search."));
2948 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2949
2950 /* Prime the search buffer. */
2951
2952 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2953 search_buf, start_addr, search_buf_size) != search_buf_size)
2954 {
2955 warning (_("Unable to access %s bytes of target "
2956 "memory at %s, halting search."),
2957 pulongest (search_buf_size), hex_string (start_addr));
2958 do_cleanups (old_cleanups);
2959 return -1;
2960 }
2961
2962 /* Perform the search.
2963
2964 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2965 When we've scanned N bytes we copy the trailing bytes to the start and
2966 read in another N bytes. */
2967
2968 while (search_space_len >= pattern_len)
2969 {
2970 gdb_byte *found_ptr;
2971 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2972
2973 found_ptr = memmem (search_buf, nr_search_bytes,
2974 pattern, pattern_len);
2975
2976 if (found_ptr != NULL)
2977 {
2978 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2979
2980 *found_addrp = found_addr;
2981 do_cleanups (old_cleanups);
2982 return 1;
2983 }
2984
2985 /* Not found in this chunk, skip to next chunk. */
2986
2987 /* Don't let search_space_len wrap here, it's unsigned. */
2988 if (search_space_len >= chunk_size)
2989 search_space_len -= chunk_size;
2990 else
2991 search_space_len = 0;
2992
2993 if (search_space_len >= pattern_len)
2994 {
2995 unsigned keep_len = search_buf_size - chunk_size;
2996 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2997 int nr_to_read;
2998
2999 /* Copy the trailing part of the previous iteration to the front
3000 of the buffer for the next iteration. */
3001 gdb_assert (keep_len == pattern_len - 1);
3002 memcpy (search_buf, search_buf + chunk_size, keep_len);
3003
3004 nr_to_read = min (search_space_len - keep_len, chunk_size);
3005
3006 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3007 search_buf + keep_len, read_addr,
3008 nr_to_read) != nr_to_read)
3009 {
3010 warning (_("Unable to access %s bytes of target "
3011 "memory at %s, halting search."),
3012 plongest (nr_to_read),
3013 hex_string (read_addr));
3014 do_cleanups (old_cleanups);
3015 return -1;
3016 }
3017
3018 start_addr += chunk_size;
3019 }
3020 }
3021
3022 /* Not found. */
3023
3024 do_cleanups (old_cleanups);
3025 return 0;
3026 }
3027
3028 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3029 sequence of bytes in PATTERN with length PATTERN_LEN.
3030
3031 The result is 1 if found, 0 if not found, and -1 if there was an error
3032 requiring halting of the search (e.g. memory read error).
3033 If the pattern is found the address is recorded in FOUND_ADDRP. */
3034
3035 int
3036 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3037 const gdb_byte *pattern, ULONGEST pattern_len,
3038 CORE_ADDR *found_addrp)
3039 {
3040 struct target_ops *t;
3041 int found;
3042
3043 /* We don't use INHERIT to set current_target.to_search_memory,
3044 so we have to scan the target stack and handle targetdebug
3045 ourselves. */
3046
3047 if (targetdebug)
3048 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3049 hex_string (start_addr));
3050
3051 for (t = current_target.beneath; t != NULL; t = t->beneath)
3052 if (t->to_search_memory != NULL)
3053 break;
3054
3055 if (t != NULL)
3056 {
3057 found = t->to_search_memory (t, start_addr, search_space_len,
3058 pattern, pattern_len, found_addrp);
3059 }
3060 else
3061 {
3062 /* If a special version of to_search_memory isn't available, use the
3063 simple version. */
3064 found = simple_search_memory (current_target.beneath,
3065 start_addr, search_space_len,
3066 pattern, pattern_len, found_addrp);
3067 }
3068
3069 if (targetdebug)
3070 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3071
3072 return found;
3073 }
3074
3075 /* Look through the currently pushed targets. If none of them will
3076 be able to restart the currently running process, issue an error
3077 message. */
3078
3079 void
3080 target_require_runnable (void)
3081 {
3082 struct target_ops *t;
3083
3084 for (t = target_stack; t != NULL; t = t->beneath)
3085 {
3086 /* If this target knows how to create a new program, then
3087 assume we will still be able to after killing the current
3088 one. Either killing and mourning will not pop T, or else
3089 find_default_run_target will find it again. */
3090 if (t->to_create_inferior != NULL)
3091 return;
3092
3093 /* Do not worry about thread_stratum targets that can not
3094 create inferiors. Assume they will be pushed again if
3095 necessary, and continue to the process_stratum. */
3096 if (t->to_stratum == thread_stratum
3097 || t->to_stratum == arch_stratum)
3098 continue;
3099
3100 error (_("The \"%s\" target does not support \"run\". "
3101 "Try \"help target\" or \"continue\"."),
3102 t->to_shortname);
3103 }
3104
3105 /* This function is only called if the target is running. In that
3106 case there should have been a process_stratum target and it
3107 should either know how to create inferiors, or not... */
3108 internal_error (__FILE__, __LINE__, _("No targets found"));
3109 }
3110
3111 /* Look through the list of possible targets for a target that can
3112 execute a run or attach command without any other data. This is
3113 used to locate the default process stratum.
3114
3115 If DO_MESG is not NULL, the result is always valid (error() is
3116 called for errors); else, return NULL on error. */
3117
3118 static struct target_ops *
3119 find_default_run_target (char *do_mesg)
3120 {
3121 struct target_ops **t;
3122 struct target_ops *runable = NULL;
3123 int count;
3124
3125 count = 0;
3126
3127 for (t = target_structs; t < target_structs + target_struct_size;
3128 ++t)
3129 {
3130 if ((*t)->to_can_run && target_can_run (*t))
3131 {
3132 runable = *t;
3133 ++count;
3134 }
3135 }
3136
3137 if (count != 1)
3138 {
3139 if (do_mesg)
3140 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3141 else
3142 return NULL;
3143 }
3144
3145 return runable;
3146 }
3147
3148 void
3149 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3150 {
3151 struct target_ops *t;
3152
3153 t = find_default_run_target ("attach");
3154 (t->to_attach) (t, args, from_tty);
3155 return;
3156 }
3157
3158 void
3159 find_default_create_inferior (struct target_ops *ops,
3160 char *exec_file, char *allargs, char **env,
3161 int from_tty)
3162 {
3163 struct target_ops *t;
3164
3165 t = find_default_run_target ("run");
3166 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3167 return;
3168 }
3169
3170 static int
3171 find_default_can_async_p (struct target_ops *ignore)
3172 {
3173 struct target_ops *t;
3174
3175 /* This may be called before the target is pushed on the stack;
3176 look for the default process stratum. If there's none, gdb isn't
3177 configured with a native debugger, and target remote isn't
3178 connected yet. */
3179 t = find_default_run_target (NULL);
3180 if (t && t->to_can_async_p != delegate_can_async_p)
3181 return (t->to_can_async_p) (t);
3182 return 0;
3183 }
3184
3185 static int
3186 find_default_is_async_p (struct target_ops *ignore)
3187 {
3188 struct target_ops *t;
3189
3190 /* This may be called before the target is pushed on the stack;
3191 look for the default process stratum. If there's none, gdb isn't
3192 configured with a native debugger, and target remote isn't
3193 connected yet. */
3194 t = find_default_run_target (NULL);
3195 if (t && t->to_is_async_p != delegate_is_async_p)
3196 return (t->to_is_async_p) (t);
3197 return 0;
3198 }
3199
3200 static int
3201 find_default_supports_non_stop (void)
3202 {
3203 struct target_ops *t;
3204
3205 t = find_default_run_target (NULL);
3206 if (t && t->to_supports_non_stop)
3207 return (t->to_supports_non_stop) ();
3208 return 0;
3209 }
3210
3211 int
3212 target_supports_non_stop (void)
3213 {
3214 struct target_ops *t;
3215
3216 for (t = &current_target; t != NULL; t = t->beneath)
3217 if (t->to_supports_non_stop)
3218 return t->to_supports_non_stop ();
3219
3220 return 0;
3221 }
3222
3223 /* Implement the "info proc" command. */
3224
3225 int
3226 target_info_proc (char *args, enum info_proc_what what)
3227 {
3228 struct target_ops *t;
3229
3230 /* If we're already connected to something that can get us OS
3231 related data, use it. Otherwise, try using the native
3232 target. */
3233 if (current_target.to_stratum >= process_stratum)
3234 t = current_target.beneath;
3235 else
3236 t = find_default_run_target (NULL);
3237
3238 for (; t != NULL; t = t->beneath)
3239 {
3240 if (t->to_info_proc != NULL)
3241 {
3242 t->to_info_proc (t, args, what);
3243
3244 if (targetdebug)
3245 fprintf_unfiltered (gdb_stdlog,
3246 "target_info_proc (\"%s\", %d)\n", args, what);
3247
3248 return 1;
3249 }
3250 }
3251
3252 return 0;
3253 }
3254
3255 static int
3256 find_default_supports_disable_randomization (void)
3257 {
3258 struct target_ops *t;
3259
3260 t = find_default_run_target (NULL);
3261 if (t && t->to_supports_disable_randomization)
3262 return (t->to_supports_disable_randomization) ();
3263 return 0;
3264 }
3265
3266 int
3267 target_supports_disable_randomization (void)
3268 {
3269 struct target_ops *t;
3270
3271 for (t = &current_target; t != NULL; t = t->beneath)
3272 if (t->to_supports_disable_randomization)
3273 return t->to_supports_disable_randomization ();
3274
3275 return 0;
3276 }
3277
3278 char *
3279 target_get_osdata (const char *type)
3280 {
3281 struct target_ops *t;
3282
3283 /* If we're already connected to something that can get us OS
3284 related data, use it. Otherwise, try using the native
3285 target. */
3286 if (current_target.to_stratum >= process_stratum)
3287 t = current_target.beneath;
3288 else
3289 t = find_default_run_target ("get OS data");
3290
3291 if (!t)
3292 return NULL;
3293
3294 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3295 }
3296
3297 /* Determine the current address space of thread PTID. */
3298
3299 struct address_space *
3300 target_thread_address_space (ptid_t ptid)
3301 {
3302 struct address_space *aspace;
3303 struct inferior *inf;
3304 struct target_ops *t;
3305
3306 for (t = current_target.beneath; t != NULL; t = t->beneath)
3307 {
3308 if (t->to_thread_address_space != NULL)
3309 {
3310 aspace = t->to_thread_address_space (t, ptid);
3311 gdb_assert (aspace);
3312
3313 if (targetdebug)
3314 fprintf_unfiltered (gdb_stdlog,
3315 "target_thread_address_space (%s) = %d\n",
3316 target_pid_to_str (ptid),
3317 address_space_num (aspace));
3318 return aspace;
3319 }
3320 }
3321
3322 /* Fall-back to the "main" address space of the inferior. */
3323 inf = find_inferior_pid (ptid_get_pid (ptid));
3324
3325 if (inf == NULL || inf->aspace == NULL)
3326 internal_error (__FILE__, __LINE__,
3327 _("Can't determine the current "
3328 "address space of thread %s\n"),
3329 target_pid_to_str (ptid));
3330
3331 return inf->aspace;
3332 }
3333
3334
3335 /* Target file operations. */
3336
3337 static struct target_ops *
3338 default_fileio_target (void)
3339 {
3340 /* If we're already connected to something that can perform
3341 file I/O, use it. Otherwise, try using the native target. */
3342 if (current_target.to_stratum >= process_stratum)
3343 return current_target.beneath;
3344 else
3345 return find_default_run_target ("file I/O");
3346 }
3347
3348 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3349 target file descriptor, or -1 if an error occurs (and set
3350 *TARGET_ERRNO). */
3351 int
3352 target_fileio_open (const char *filename, int flags, int mode,
3353 int *target_errno)
3354 {
3355 struct target_ops *t;
3356
3357 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3358 {
3359 if (t->to_fileio_open != NULL)
3360 {
3361 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3362
3363 if (targetdebug)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3366 filename, flags, mode,
3367 fd, fd != -1 ? 0 : *target_errno);
3368 return fd;
3369 }
3370 }
3371
3372 *target_errno = FILEIO_ENOSYS;
3373 return -1;
3374 }
3375
3376 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3377 Return the number of bytes written, or -1 if an error occurs
3378 (and set *TARGET_ERRNO). */
3379 int
3380 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3381 ULONGEST offset, int *target_errno)
3382 {
3383 struct target_ops *t;
3384
3385 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3386 {
3387 if (t->to_fileio_pwrite != NULL)
3388 {
3389 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3390 target_errno);
3391
3392 if (targetdebug)
3393 fprintf_unfiltered (gdb_stdlog,
3394 "target_fileio_pwrite (%d,...,%d,%s) "
3395 "= %d (%d)\n",
3396 fd, len, pulongest (offset),
3397 ret, ret != -1 ? 0 : *target_errno);
3398 return ret;
3399 }
3400 }
3401
3402 *target_errno = FILEIO_ENOSYS;
3403 return -1;
3404 }
3405
3406 /* Read up to LEN bytes FD on the target into READ_BUF.
3407 Return the number of bytes read, or -1 if an error occurs
3408 (and set *TARGET_ERRNO). */
3409 int
3410 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3411 ULONGEST offset, int *target_errno)
3412 {
3413 struct target_ops *t;
3414
3415 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3416 {
3417 if (t->to_fileio_pread != NULL)
3418 {
3419 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3420 target_errno);
3421
3422 if (targetdebug)
3423 fprintf_unfiltered (gdb_stdlog,
3424 "target_fileio_pread (%d,...,%d,%s) "
3425 "= %d (%d)\n",
3426 fd, len, pulongest (offset),
3427 ret, ret != -1 ? 0 : *target_errno);
3428 return ret;
3429 }
3430 }
3431
3432 *target_errno = FILEIO_ENOSYS;
3433 return -1;
3434 }
3435
3436 /* Close FD on the target. Return 0, or -1 if an error occurs
3437 (and set *TARGET_ERRNO). */
3438 int
3439 target_fileio_close (int fd, int *target_errno)
3440 {
3441 struct target_ops *t;
3442
3443 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3444 {
3445 if (t->to_fileio_close != NULL)
3446 {
3447 int ret = t->to_fileio_close (fd, target_errno);
3448
3449 if (targetdebug)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "target_fileio_close (%d) = %d (%d)\n",
3452 fd, ret, ret != -1 ? 0 : *target_errno);
3453 return ret;
3454 }
3455 }
3456
3457 *target_errno = FILEIO_ENOSYS;
3458 return -1;
3459 }
3460
3461 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3462 occurs (and set *TARGET_ERRNO). */
3463 int
3464 target_fileio_unlink (const char *filename, int *target_errno)
3465 {
3466 struct target_ops *t;
3467
3468 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3469 {
3470 if (t->to_fileio_unlink != NULL)
3471 {
3472 int ret = t->to_fileio_unlink (filename, target_errno);
3473
3474 if (targetdebug)
3475 fprintf_unfiltered (gdb_stdlog,
3476 "target_fileio_unlink (%s) = %d (%d)\n",
3477 filename, ret, ret != -1 ? 0 : *target_errno);
3478 return ret;
3479 }
3480 }
3481
3482 *target_errno = FILEIO_ENOSYS;
3483 return -1;
3484 }
3485
3486 /* Read value of symbolic link FILENAME on the target. Return a
3487 null-terminated string allocated via xmalloc, or NULL if an error
3488 occurs (and set *TARGET_ERRNO). */
3489 char *
3490 target_fileio_readlink (const char *filename, int *target_errno)
3491 {
3492 struct target_ops *t;
3493
3494 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3495 {
3496 if (t->to_fileio_readlink != NULL)
3497 {
3498 char *ret = t->to_fileio_readlink (filename, target_errno);
3499
3500 if (targetdebug)
3501 fprintf_unfiltered (gdb_stdlog,
3502 "target_fileio_readlink (%s) = %s (%d)\n",
3503 filename, ret? ret : "(nil)",
3504 ret? 0 : *target_errno);
3505 return ret;
3506 }
3507 }
3508
3509 *target_errno = FILEIO_ENOSYS;
3510 return NULL;
3511 }
3512
3513 static void
3514 target_fileio_close_cleanup (void *opaque)
3515 {
3516 int fd = *(int *) opaque;
3517 int target_errno;
3518
3519 target_fileio_close (fd, &target_errno);
3520 }
3521
3522 /* Read target file FILENAME. Store the result in *BUF_P and
3523 return the size of the transferred data. PADDING additional bytes are
3524 available in *BUF_P. This is a helper function for
3525 target_fileio_read_alloc; see the declaration of that function for more
3526 information. */
3527
3528 static LONGEST
3529 target_fileio_read_alloc_1 (const char *filename,
3530 gdb_byte **buf_p, int padding)
3531 {
3532 struct cleanup *close_cleanup;
3533 size_t buf_alloc, buf_pos;
3534 gdb_byte *buf;
3535 LONGEST n;
3536 int fd;
3537 int target_errno;
3538
3539 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3540 if (fd == -1)
3541 return -1;
3542
3543 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3544
3545 /* Start by reading up to 4K at a time. The target will throttle
3546 this number down if necessary. */
3547 buf_alloc = 4096;
3548 buf = xmalloc (buf_alloc);
3549 buf_pos = 0;
3550 while (1)
3551 {
3552 n = target_fileio_pread (fd, &buf[buf_pos],
3553 buf_alloc - buf_pos - padding, buf_pos,
3554 &target_errno);
3555 if (n < 0)
3556 {
3557 /* An error occurred. */
3558 do_cleanups (close_cleanup);
3559 xfree (buf);
3560 return -1;
3561 }
3562 else if (n == 0)
3563 {
3564 /* Read all there was. */
3565 do_cleanups (close_cleanup);
3566 if (buf_pos == 0)
3567 xfree (buf);
3568 else
3569 *buf_p = buf;
3570 return buf_pos;
3571 }
3572
3573 buf_pos += n;
3574
3575 /* If the buffer is filling up, expand it. */
3576 if (buf_alloc < buf_pos * 2)
3577 {
3578 buf_alloc *= 2;
3579 buf = xrealloc (buf, buf_alloc);
3580 }
3581
3582 QUIT;
3583 }
3584 }
3585
3586 /* Read target file FILENAME. Store the result in *BUF_P and return
3587 the size of the transferred data. See the declaration in "target.h"
3588 function for more information about the return value. */
3589
3590 LONGEST
3591 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3592 {
3593 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3594 }
3595
3596 /* Read target file FILENAME. The result is NUL-terminated and
3597 returned as a string, allocated using xmalloc. If an error occurs
3598 or the transfer is unsupported, NULL is returned. Empty objects
3599 are returned as allocated but empty strings. A warning is issued
3600 if the result contains any embedded NUL bytes. */
3601
3602 char *
3603 target_fileio_read_stralloc (const char *filename)
3604 {
3605 gdb_byte *buffer;
3606 char *bufstr;
3607 LONGEST i, transferred;
3608
3609 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3610 bufstr = (char *) buffer;
3611
3612 if (transferred < 0)
3613 return NULL;
3614
3615 if (transferred == 0)
3616 return xstrdup ("");
3617
3618 bufstr[transferred] = 0;
3619
3620 /* Check for embedded NUL bytes; but allow trailing NULs. */
3621 for (i = strlen (bufstr); i < transferred; i++)
3622 if (bufstr[i] != 0)
3623 {
3624 warning (_("target file %s "
3625 "contained unexpected null characters"),
3626 filename);
3627 break;
3628 }
3629
3630 return bufstr;
3631 }
3632
3633
3634 static int
3635 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3636 {
3637 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3638 }
3639
3640 static int
3641 default_watchpoint_addr_within_range (struct target_ops *target,
3642 CORE_ADDR addr,
3643 CORE_ADDR start, int length)
3644 {
3645 return addr >= start && addr < start + length;
3646 }
3647
3648 static struct gdbarch *
3649 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3650 {
3651 return target_gdbarch ();
3652 }
3653
3654 static int
3655 return_zero (void)
3656 {
3657 return 0;
3658 }
3659
3660 static int
3661 return_one (void)
3662 {
3663 return 1;
3664 }
3665
3666 static int
3667 return_minus_one (void)
3668 {
3669 return -1;
3670 }
3671
3672 static void *
3673 return_null (void)
3674 {
3675 return 0;
3676 }
3677
3678 /*
3679 * Find the next target down the stack from the specified target.
3680 */
3681
3682 struct target_ops *
3683 find_target_beneath (struct target_ops *t)
3684 {
3685 return t->beneath;
3686 }
3687
3688 /* See target.h. */
3689
3690 struct target_ops *
3691 find_target_at (enum strata stratum)
3692 {
3693 struct target_ops *t;
3694
3695 for (t = current_target.beneath; t != NULL; t = t->beneath)
3696 if (t->to_stratum == stratum)
3697 return t;
3698
3699 return NULL;
3700 }
3701
3702 \f
3703 /* The inferior process has died. Long live the inferior! */
3704
3705 void
3706 generic_mourn_inferior (void)
3707 {
3708 ptid_t ptid;
3709
3710 ptid = inferior_ptid;
3711 inferior_ptid = null_ptid;
3712
3713 /* Mark breakpoints uninserted in case something tries to delete a
3714 breakpoint while we delete the inferior's threads (which would
3715 fail, since the inferior is long gone). */
3716 mark_breakpoints_out ();
3717
3718 if (!ptid_equal (ptid, null_ptid))
3719 {
3720 int pid = ptid_get_pid (ptid);
3721 exit_inferior (pid);
3722 }
3723
3724 /* Note this wipes step-resume breakpoints, so needs to be done
3725 after exit_inferior, which ends up referencing the step-resume
3726 breakpoints through clear_thread_inferior_resources. */
3727 breakpoint_init_inferior (inf_exited);
3728
3729 registers_changed ();
3730
3731 reopen_exec_file ();
3732 reinit_frame_cache ();
3733
3734 if (deprecated_detach_hook)
3735 deprecated_detach_hook ();
3736 }
3737 \f
3738 /* Convert a normal process ID to a string. Returns the string in a
3739 static buffer. */
3740
3741 char *
3742 normal_pid_to_str (ptid_t ptid)
3743 {
3744 static char buf[32];
3745
3746 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3747 return buf;
3748 }
3749
3750 static char *
3751 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3752 {
3753 return normal_pid_to_str (ptid);
3754 }
3755
3756 /* Error-catcher for target_find_memory_regions. */
3757 static int
3758 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3759 {
3760 error (_("Command not implemented for this target."));
3761 return 0;
3762 }
3763
3764 /* Error-catcher for target_make_corefile_notes. */
3765 static char *
3766 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3767 {
3768 error (_("Command not implemented for this target."));
3769 return NULL;
3770 }
3771
3772 /* Error-catcher for target_get_bookmark. */
3773 static gdb_byte *
3774 dummy_get_bookmark (char *ignore1, int ignore2)
3775 {
3776 tcomplain ();
3777 return NULL;
3778 }
3779
3780 /* Error-catcher for target_goto_bookmark. */
3781 static void
3782 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3783 {
3784 tcomplain ();
3785 }
3786
3787 /* Set up the handful of non-empty slots needed by the dummy target
3788 vector. */
3789
3790 static void
3791 init_dummy_target (void)
3792 {
3793 dummy_target.to_shortname = "None";
3794 dummy_target.to_longname = "None";
3795 dummy_target.to_doc = "";
3796 dummy_target.to_attach = find_default_attach;
3797 dummy_target.to_detach =
3798 (void (*)(struct target_ops *, const char *, int))target_ignore;
3799 dummy_target.to_create_inferior = find_default_create_inferior;
3800 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3801 dummy_target.to_supports_disable_randomization
3802 = find_default_supports_disable_randomization;
3803 dummy_target.to_pid_to_str = dummy_pid_to_str;
3804 dummy_target.to_stratum = dummy_stratum;
3805 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3806 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3807 dummy_target.to_get_bookmark = dummy_get_bookmark;
3808 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3809 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3810 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3811 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3812 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3813 dummy_target.to_has_execution
3814 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3815 dummy_target.to_magic = OPS_MAGIC;
3816
3817 install_dummy_methods (&dummy_target);
3818 }
3819 \f
3820 static void
3821 debug_to_open (char *args, int from_tty)
3822 {
3823 debug_target.to_open (args, from_tty);
3824
3825 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3826 }
3827
3828 void
3829 target_close (struct target_ops *targ)
3830 {
3831 gdb_assert (!target_is_pushed (targ));
3832
3833 if (targ->to_xclose != NULL)
3834 targ->to_xclose (targ);
3835 else if (targ->to_close != NULL)
3836 targ->to_close (targ);
3837
3838 if (targetdebug)
3839 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3840 }
3841
3842 void
3843 target_attach (char *args, int from_tty)
3844 {
3845 struct target_ops *t;
3846
3847 for (t = current_target.beneath; t != NULL; t = t->beneath)
3848 {
3849 if (t->to_attach != NULL)
3850 {
3851 t->to_attach (t, args, from_tty);
3852 if (targetdebug)
3853 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3854 args, from_tty);
3855 return;
3856 }
3857 }
3858
3859 internal_error (__FILE__, __LINE__,
3860 _("could not find a target to attach"));
3861 }
3862
3863 int
3864 target_thread_alive (ptid_t ptid)
3865 {
3866 struct target_ops *t;
3867
3868 for (t = current_target.beneath; t != NULL; t = t->beneath)
3869 {
3870 if (t->to_thread_alive != NULL)
3871 {
3872 int retval;
3873
3874 retval = t->to_thread_alive (t, ptid);
3875 if (targetdebug)
3876 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3877 ptid_get_pid (ptid), retval);
3878
3879 return retval;
3880 }
3881 }
3882
3883 return 0;
3884 }
3885
3886 void
3887 target_find_new_threads (void)
3888 {
3889 struct target_ops *t;
3890
3891 for (t = current_target.beneath; t != NULL; t = t->beneath)
3892 {
3893 if (t->to_find_new_threads != NULL)
3894 {
3895 t->to_find_new_threads (t);
3896 if (targetdebug)
3897 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3898
3899 return;
3900 }
3901 }
3902 }
3903
3904 void
3905 target_stop (ptid_t ptid)
3906 {
3907 if (!may_stop)
3908 {
3909 warning (_("May not interrupt or stop the target, ignoring attempt"));
3910 return;
3911 }
3912
3913 (*current_target.to_stop) (ptid);
3914 }
3915
3916 static void
3917 debug_to_post_attach (int pid)
3918 {
3919 debug_target.to_post_attach (pid);
3920
3921 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3922 }
3923
3924 /* Concatenate ELEM to LIST, a comma separate list, and return the
3925 result. The LIST incoming argument is released. */
3926
3927 static char *
3928 str_comma_list_concat_elem (char *list, const char *elem)
3929 {
3930 if (list == NULL)
3931 return xstrdup (elem);
3932 else
3933 return reconcat (list, list, ", ", elem, (char *) NULL);
3934 }
3935
3936 /* Helper for target_options_to_string. If OPT is present in
3937 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3938 Returns the new resulting string. OPT is removed from
3939 TARGET_OPTIONS. */
3940
3941 static char *
3942 do_option (int *target_options, char *ret,
3943 int opt, char *opt_str)
3944 {
3945 if ((*target_options & opt) != 0)
3946 {
3947 ret = str_comma_list_concat_elem (ret, opt_str);
3948 *target_options &= ~opt;
3949 }
3950
3951 return ret;
3952 }
3953
3954 char *
3955 target_options_to_string (int target_options)
3956 {
3957 char *ret = NULL;
3958
3959 #define DO_TARG_OPTION(OPT) \
3960 ret = do_option (&target_options, ret, OPT, #OPT)
3961
3962 DO_TARG_OPTION (TARGET_WNOHANG);
3963
3964 if (target_options != 0)
3965 ret = str_comma_list_concat_elem (ret, "unknown???");
3966
3967 if (ret == NULL)
3968 ret = xstrdup ("");
3969 return ret;
3970 }
3971
3972 static void
3973 debug_print_register (const char * func,
3974 struct regcache *regcache, int regno)
3975 {
3976 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3977
3978 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3979 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3980 && gdbarch_register_name (gdbarch, regno) != NULL
3981 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3982 fprintf_unfiltered (gdb_stdlog, "(%s)",
3983 gdbarch_register_name (gdbarch, regno));
3984 else
3985 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3986 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3987 {
3988 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3989 int i, size = register_size (gdbarch, regno);
3990 gdb_byte buf[MAX_REGISTER_SIZE];
3991
3992 regcache_raw_collect (regcache, regno, buf);
3993 fprintf_unfiltered (gdb_stdlog, " = ");
3994 for (i = 0; i < size; i++)
3995 {
3996 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3997 }
3998 if (size <= sizeof (LONGEST))
3999 {
4000 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4001
4002 fprintf_unfiltered (gdb_stdlog, " %s %s",
4003 core_addr_to_string_nz (val), plongest (val));
4004 }
4005 }
4006 fprintf_unfiltered (gdb_stdlog, "\n");
4007 }
4008
4009 void
4010 target_fetch_registers (struct regcache *regcache, int regno)
4011 {
4012 struct target_ops *t;
4013
4014 for (t = current_target.beneath; t != NULL; t = t->beneath)
4015 {
4016 if (t->to_fetch_registers != NULL)
4017 {
4018 t->to_fetch_registers (t, regcache, regno);
4019 if (targetdebug)
4020 debug_print_register ("target_fetch_registers", regcache, regno);
4021 return;
4022 }
4023 }
4024 }
4025
4026 void
4027 target_store_registers (struct regcache *regcache, int regno)
4028 {
4029 struct target_ops *t;
4030
4031 if (!may_write_registers)
4032 error (_("Writing to registers is not allowed (regno %d)"), regno);
4033
4034 current_target.to_store_registers (&current_target, regcache, regno);
4035 if (targetdebug)
4036 {
4037 debug_print_register ("target_store_registers", regcache, regno);
4038 }
4039 }
4040
4041 int
4042 target_core_of_thread (ptid_t ptid)
4043 {
4044 struct target_ops *t;
4045
4046 for (t = current_target.beneath; t != NULL; t = t->beneath)
4047 {
4048 if (t->to_core_of_thread != NULL)
4049 {
4050 int retval = t->to_core_of_thread (t, ptid);
4051
4052 if (targetdebug)
4053 fprintf_unfiltered (gdb_stdlog,
4054 "target_core_of_thread (%d) = %d\n",
4055 ptid_get_pid (ptid), retval);
4056 return retval;
4057 }
4058 }
4059
4060 return -1;
4061 }
4062
4063 int
4064 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4065 {
4066 struct target_ops *t;
4067
4068 for (t = current_target.beneath; t != NULL; t = t->beneath)
4069 {
4070 if (t->to_verify_memory != NULL)
4071 {
4072 int retval = t->to_verify_memory (t, data, memaddr, size);
4073
4074 if (targetdebug)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "target_verify_memory (%s, %s) = %d\n",
4077 paddress (target_gdbarch (), memaddr),
4078 pulongest (size),
4079 retval);
4080 return retval;
4081 }
4082 }
4083
4084 tcomplain ();
4085 }
4086
4087 /* The documentation for this function is in its prototype declaration in
4088 target.h. */
4089
4090 int
4091 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4092 {
4093 struct target_ops *t;
4094
4095 for (t = current_target.beneath; t != NULL; t = t->beneath)
4096 if (t->to_insert_mask_watchpoint != NULL)
4097 {
4098 int ret;
4099
4100 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4101
4102 if (targetdebug)
4103 fprintf_unfiltered (gdb_stdlog, "\
4104 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4105 core_addr_to_string (addr),
4106 core_addr_to_string (mask), rw, ret);
4107
4108 return ret;
4109 }
4110
4111 return 1;
4112 }
4113
4114 /* The documentation for this function is in its prototype declaration in
4115 target.h. */
4116
4117 int
4118 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4119 {
4120 struct target_ops *t;
4121
4122 for (t = current_target.beneath; t != NULL; t = t->beneath)
4123 if (t->to_remove_mask_watchpoint != NULL)
4124 {
4125 int ret;
4126
4127 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4128
4129 if (targetdebug)
4130 fprintf_unfiltered (gdb_stdlog, "\
4131 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4132 core_addr_to_string (addr),
4133 core_addr_to_string (mask), rw, ret);
4134
4135 return ret;
4136 }
4137
4138 return 1;
4139 }
4140
4141 /* The documentation for this function is in its prototype declaration
4142 in target.h. */
4143
4144 int
4145 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4146 {
4147 struct target_ops *t;
4148
4149 for (t = current_target.beneath; t != NULL; t = t->beneath)
4150 if (t->to_masked_watch_num_registers != NULL)
4151 return t->to_masked_watch_num_registers (t, addr, mask);
4152
4153 return -1;
4154 }
4155
4156 /* The documentation for this function is in its prototype declaration
4157 in target.h. */
4158
4159 int
4160 target_ranged_break_num_registers (void)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_ranged_break_num_registers != NULL)
4166 return t->to_ranged_break_num_registers (t);
4167
4168 return -1;
4169 }
4170
4171 /* See target.h. */
4172
4173 struct btrace_target_info *
4174 target_enable_btrace (ptid_t ptid)
4175 {
4176 struct target_ops *t;
4177
4178 for (t = current_target.beneath; t != NULL; t = t->beneath)
4179 if (t->to_enable_btrace != NULL)
4180 return t->to_enable_btrace (ptid);
4181
4182 tcomplain ();
4183 return NULL;
4184 }
4185
4186 /* See target.h. */
4187
4188 void
4189 target_disable_btrace (struct btrace_target_info *btinfo)
4190 {
4191 struct target_ops *t;
4192
4193 for (t = current_target.beneath; t != NULL; t = t->beneath)
4194 if (t->to_disable_btrace != NULL)
4195 {
4196 t->to_disable_btrace (btinfo);
4197 return;
4198 }
4199
4200 tcomplain ();
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_teardown_btrace (struct btrace_target_info *btinfo)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_teardown_btrace != NULL)
4212 {
4213 t->to_teardown_btrace (btinfo);
4214 return;
4215 }
4216
4217 tcomplain ();
4218 }
4219
4220 /* See target.h. */
4221
4222 enum btrace_error
4223 target_read_btrace (VEC (btrace_block_s) **btrace,
4224 struct btrace_target_info *btinfo,
4225 enum btrace_read_type type)
4226 {
4227 struct target_ops *t;
4228
4229 for (t = current_target.beneath; t != NULL; t = t->beneath)
4230 if (t->to_read_btrace != NULL)
4231 return t->to_read_btrace (btrace, btinfo, type);
4232
4233 tcomplain ();
4234 return BTRACE_ERR_NOT_SUPPORTED;
4235 }
4236
4237 /* See target.h. */
4238
4239 void
4240 target_stop_recording (void)
4241 {
4242 struct target_ops *t;
4243
4244 for (t = current_target.beneath; t != NULL; t = t->beneath)
4245 if (t->to_stop_recording != NULL)
4246 {
4247 t->to_stop_recording ();
4248 return;
4249 }
4250
4251 /* This is optional. */
4252 }
4253
4254 /* See target.h. */
4255
4256 void
4257 target_info_record (void)
4258 {
4259 struct target_ops *t;
4260
4261 for (t = current_target.beneath; t != NULL; t = t->beneath)
4262 if (t->to_info_record != NULL)
4263 {
4264 t->to_info_record ();
4265 return;
4266 }
4267
4268 tcomplain ();
4269 }
4270
4271 /* See target.h. */
4272
4273 void
4274 target_save_record (const char *filename)
4275 {
4276 struct target_ops *t;
4277
4278 for (t = current_target.beneath; t != NULL; t = t->beneath)
4279 if (t->to_save_record != NULL)
4280 {
4281 t->to_save_record (filename);
4282 return;
4283 }
4284
4285 tcomplain ();
4286 }
4287
4288 /* See target.h. */
4289
4290 int
4291 target_supports_delete_record (void)
4292 {
4293 struct target_ops *t;
4294
4295 for (t = current_target.beneath; t != NULL; t = t->beneath)
4296 if (t->to_delete_record != NULL)
4297 return 1;
4298
4299 return 0;
4300 }
4301
4302 /* See target.h. */
4303
4304 void
4305 target_delete_record (void)
4306 {
4307 struct target_ops *t;
4308
4309 for (t = current_target.beneath; t != NULL; t = t->beneath)
4310 if (t->to_delete_record != NULL)
4311 {
4312 t->to_delete_record ();
4313 return;
4314 }
4315
4316 tcomplain ();
4317 }
4318
4319 /* See target.h. */
4320
4321 int
4322 target_record_is_replaying (void)
4323 {
4324 struct target_ops *t;
4325
4326 for (t = current_target.beneath; t != NULL; t = t->beneath)
4327 if (t->to_record_is_replaying != NULL)
4328 return t->to_record_is_replaying ();
4329
4330 return 0;
4331 }
4332
4333 /* See target.h. */
4334
4335 void
4336 target_goto_record_begin (void)
4337 {
4338 struct target_ops *t;
4339
4340 for (t = current_target.beneath; t != NULL; t = t->beneath)
4341 if (t->to_goto_record_begin != NULL)
4342 {
4343 t->to_goto_record_begin ();
4344 return;
4345 }
4346
4347 tcomplain ();
4348 }
4349
4350 /* See target.h. */
4351
4352 void
4353 target_goto_record_end (void)
4354 {
4355 struct target_ops *t;
4356
4357 for (t = current_target.beneath; t != NULL; t = t->beneath)
4358 if (t->to_goto_record_end != NULL)
4359 {
4360 t->to_goto_record_end ();
4361 return;
4362 }
4363
4364 tcomplain ();
4365 }
4366
4367 /* See target.h. */
4368
4369 void
4370 target_goto_record (ULONGEST insn)
4371 {
4372 struct target_ops *t;
4373
4374 for (t = current_target.beneath; t != NULL; t = t->beneath)
4375 if (t->to_goto_record != NULL)
4376 {
4377 t->to_goto_record (insn);
4378 return;
4379 }
4380
4381 tcomplain ();
4382 }
4383
4384 /* See target.h. */
4385
4386 void
4387 target_insn_history (int size, int flags)
4388 {
4389 struct target_ops *t;
4390
4391 for (t = current_target.beneath; t != NULL; t = t->beneath)
4392 if (t->to_insn_history != NULL)
4393 {
4394 t->to_insn_history (size, flags);
4395 return;
4396 }
4397
4398 tcomplain ();
4399 }
4400
4401 /* See target.h. */
4402
4403 void
4404 target_insn_history_from (ULONGEST from, int size, int flags)
4405 {
4406 struct target_ops *t;
4407
4408 for (t = current_target.beneath; t != NULL; t = t->beneath)
4409 if (t->to_insn_history_from != NULL)
4410 {
4411 t->to_insn_history_from (from, size, flags);
4412 return;
4413 }
4414
4415 tcomplain ();
4416 }
4417
4418 /* See target.h. */
4419
4420 void
4421 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4422 {
4423 struct target_ops *t;
4424
4425 for (t = current_target.beneath; t != NULL; t = t->beneath)
4426 if (t->to_insn_history_range != NULL)
4427 {
4428 t->to_insn_history_range (begin, end, flags);
4429 return;
4430 }
4431
4432 tcomplain ();
4433 }
4434
4435 /* See target.h. */
4436
4437 void
4438 target_call_history (int size, int flags)
4439 {
4440 struct target_ops *t;
4441
4442 for (t = current_target.beneath; t != NULL; t = t->beneath)
4443 if (t->to_call_history != NULL)
4444 {
4445 t->to_call_history (size, flags);
4446 return;
4447 }
4448
4449 tcomplain ();
4450 }
4451
4452 /* See target.h. */
4453
4454 void
4455 target_call_history_from (ULONGEST begin, int size, int flags)
4456 {
4457 struct target_ops *t;
4458
4459 for (t = current_target.beneath; t != NULL; t = t->beneath)
4460 if (t->to_call_history_from != NULL)
4461 {
4462 t->to_call_history_from (begin, size, flags);
4463 return;
4464 }
4465
4466 tcomplain ();
4467 }
4468
4469 /* See target.h. */
4470
4471 void
4472 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4473 {
4474 struct target_ops *t;
4475
4476 for (t = current_target.beneath; t != NULL; t = t->beneath)
4477 if (t->to_call_history_range != NULL)
4478 {
4479 t->to_call_history_range (begin, end, flags);
4480 return;
4481 }
4482
4483 tcomplain ();
4484 }
4485
4486 static void
4487 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4488 {
4489 debug_target.to_prepare_to_store (&debug_target, regcache);
4490
4491 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4492 }
4493
4494 /* See target.h. */
4495
4496 const struct frame_unwind *
4497 target_get_unwinder (void)
4498 {
4499 struct target_ops *t;
4500
4501 for (t = current_target.beneath; t != NULL; t = t->beneath)
4502 if (t->to_get_unwinder != NULL)
4503 return t->to_get_unwinder;
4504
4505 return NULL;
4506 }
4507
4508 /* See target.h. */
4509
4510 const struct frame_unwind *
4511 target_get_tailcall_unwinder (void)
4512 {
4513 struct target_ops *t;
4514
4515 for (t = current_target.beneath; t != NULL; t = t->beneath)
4516 if (t->to_get_tailcall_unwinder != NULL)
4517 return t->to_get_tailcall_unwinder;
4518
4519 return NULL;
4520 }
4521
4522 /* See target.h. */
4523
4524 CORE_ADDR
4525 forward_target_decr_pc_after_break (struct target_ops *ops,
4526 struct gdbarch *gdbarch)
4527 {
4528 for (; ops != NULL; ops = ops->beneath)
4529 if (ops->to_decr_pc_after_break != NULL)
4530 return ops->to_decr_pc_after_break (ops, gdbarch);
4531
4532 return gdbarch_decr_pc_after_break (gdbarch);
4533 }
4534
4535 /* See target.h. */
4536
4537 CORE_ADDR
4538 target_decr_pc_after_break (struct gdbarch *gdbarch)
4539 {
4540 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4541 }
4542
4543 static int
4544 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4545 int write, struct mem_attrib *attrib,
4546 struct target_ops *target)
4547 {
4548 int retval;
4549
4550 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4551 attrib, target);
4552
4553 fprintf_unfiltered (gdb_stdlog,
4554 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4555 paddress (target_gdbarch (), memaddr), len,
4556 write ? "write" : "read", retval);
4557
4558 if (retval > 0)
4559 {
4560 int i;
4561
4562 fputs_unfiltered (", bytes =", gdb_stdlog);
4563 for (i = 0; i < retval; i++)
4564 {
4565 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4566 {
4567 if (targetdebug < 2 && i > 0)
4568 {
4569 fprintf_unfiltered (gdb_stdlog, " ...");
4570 break;
4571 }
4572 fprintf_unfiltered (gdb_stdlog, "\n");
4573 }
4574
4575 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4576 }
4577 }
4578
4579 fputc_unfiltered ('\n', gdb_stdlog);
4580
4581 return retval;
4582 }
4583
4584 static void
4585 debug_to_files_info (struct target_ops *target)
4586 {
4587 debug_target.to_files_info (target);
4588
4589 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4590 }
4591
4592 static int
4593 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4594 struct bp_target_info *bp_tgt)
4595 {
4596 int retval;
4597
4598 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4599
4600 fprintf_unfiltered (gdb_stdlog,
4601 "target_insert_breakpoint (%s, xxx) = %ld\n",
4602 core_addr_to_string (bp_tgt->placed_address),
4603 (unsigned long) retval);
4604 return retval;
4605 }
4606
4607 static int
4608 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4609 struct bp_target_info *bp_tgt)
4610 {
4611 int retval;
4612
4613 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4614
4615 fprintf_unfiltered (gdb_stdlog,
4616 "target_remove_breakpoint (%s, xxx) = %ld\n",
4617 core_addr_to_string (bp_tgt->placed_address),
4618 (unsigned long) retval);
4619 return retval;
4620 }
4621
4622 static int
4623 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4624 {
4625 int retval;
4626
4627 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4628
4629 fprintf_unfiltered (gdb_stdlog,
4630 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4631 (unsigned long) type,
4632 (unsigned long) cnt,
4633 (unsigned long) from_tty,
4634 (unsigned long) retval);
4635 return retval;
4636 }
4637
4638 static int
4639 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4640 {
4641 CORE_ADDR retval;
4642
4643 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4644
4645 fprintf_unfiltered (gdb_stdlog,
4646 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4647 core_addr_to_string (addr), (unsigned long) len,
4648 core_addr_to_string (retval));
4649 return retval;
4650 }
4651
4652 static int
4653 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4654 struct expression *cond)
4655 {
4656 int retval;
4657
4658 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4659 rw, cond);
4660
4661 fprintf_unfiltered (gdb_stdlog,
4662 "target_can_accel_watchpoint_condition "
4663 "(%s, %d, %d, %s) = %ld\n",
4664 core_addr_to_string (addr), len, rw,
4665 host_address_to_string (cond), (unsigned long) retval);
4666 return retval;
4667 }
4668
4669 static int
4670 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4671 {
4672 int retval;
4673
4674 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4675
4676 fprintf_unfiltered (gdb_stdlog,
4677 "target_stopped_by_watchpoint () = %ld\n",
4678 (unsigned long) retval);
4679 return retval;
4680 }
4681
4682 static int
4683 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4684 {
4685 int retval;
4686
4687 retval = debug_target.to_stopped_data_address (target, addr);
4688
4689 fprintf_unfiltered (gdb_stdlog,
4690 "target_stopped_data_address ([%s]) = %ld\n",
4691 core_addr_to_string (*addr),
4692 (unsigned long)retval);
4693 return retval;
4694 }
4695
4696 static int
4697 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4698 CORE_ADDR addr,
4699 CORE_ADDR start, int length)
4700 {
4701 int retval;
4702
4703 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4704 start, length);
4705
4706 fprintf_filtered (gdb_stdlog,
4707 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4708 core_addr_to_string (addr), core_addr_to_string (start),
4709 length, retval);
4710 return retval;
4711 }
4712
4713 static int
4714 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4715 struct bp_target_info *bp_tgt)
4716 {
4717 int retval;
4718
4719 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4720
4721 fprintf_unfiltered (gdb_stdlog,
4722 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4723 core_addr_to_string (bp_tgt->placed_address),
4724 (unsigned long) retval);
4725 return retval;
4726 }
4727
4728 static int
4729 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4730 struct bp_target_info *bp_tgt)
4731 {
4732 int retval;
4733
4734 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4735
4736 fprintf_unfiltered (gdb_stdlog,
4737 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4738 core_addr_to_string (bp_tgt->placed_address),
4739 (unsigned long) retval);
4740 return retval;
4741 }
4742
4743 static int
4744 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4745 struct expression *cond)
4746 {
4747 int retval;
4748
4749 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4750
4751 fprintf_unfiltered (gdb_stdlog,
4752 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4753 core_addr_to_string (addr), len, type,
4754 host_address_to_string (cond), (unsigned long) retval);
4755 return retval;
4756 }
4757
4758 static int
4759 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4760 struct expression *cond)
4761 {
4762 int retval;
4763
4764 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4765
4766 fprintf_unfiltered (gdb_stdlog,
4767 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4768 core_addr_to_string (addr), len, type,
4769 host_address_to_string (cond), (unsigned long) retval);
4770 return retval;
4771 }
4772
4773 static void
4774 debug_to_terminal_init (void)
4775 {
4776 debug_target.to_terminal_init ();
4777
4778 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4779 }
4780
4781 static void
4782 debug_to_terminal_inferior (void)
4783 {
4784 debug_target.to_terminal_inferior ();
4785
4786 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4787 }
4788
4789 static void
4790 debug_to_terminal_ours_for_output (void)
4791 {
4792 debug_target.to_terminal_ours_for_output ();
4793
4794 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4795 }
4796
4797 static void
4798 debug_to_terminal_ours (void)
4799 {
4800 debug_target.to_terminal_ours ();
4801
4802 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4803 }
4804
4805 static void
4806 debug_to_terminal_save_ours (void)
4807 {
4808 debug_target.to_terminal_save_ours ();
4809
4810 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4811 }
4812
4813 static void
4814 debug_to_terminal_info (const char *arg, int from_tty)
4815 {
4816 debug_target.to_terminal_info (arg, from_tty);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4819 from_tty);
4820 }
4821
4822 static void
4823 debug_to_load (char *args, int from_tty)
4824 {
4825 debug_target.to_load (args, from_tty);
4826
4827 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4828 }
4829
4830 static void
4831 debug_to_post_startup_inferior (ptid_t ptid)
4832 {
4833 debug_target.to_post_startup_inferior (ptid);
4834
4835 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4836 ptid_get_pid (ptid));
4837 }
4838
4839 static int
4840 debug_to_insert_fork_catchpoint (int pid)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_insert_fork_catchpoint (pid);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4847 pid, retval);
4848
4849 return retval;
4850 }
4851
4852 static int
4853 debug_to_remove_fork_catchpoint (int pid)
4854 {
4855 int retval;
4856
4857 retval = debug_target.to_remove_fork_catchpoint (pid);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4860 pid, retval);
4861
4862 return retval;
4863 }
4864
4865 static int
4866 debug_to_insert_vfork_catchpoint (int pid)
4867 {
4868 int retval;
4869
4870 retval = debug_target.to_insert_vfork_catchpoint (pid);
4871
4872 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4873 pid, retval);
4874
4875 return retval;
4876 }
4877
4878 static int
4879 debug_to_remove_vfork_catchpoint (int pid)
4880 {
4881 int retval;
4882
4883 retval = debug_target.to_remove_vfork_catchpoint (pid);
4884
4885 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4886 pid, retval);
4887
4888 return retval;
4889 }
4890
4891 static int
4892 debug_to_insert_exec_catchpoint (int pid)
4893 {
4894 int retval;
4895
4896 retval = debug_target.to_insert_exec_catchpoint (pid);
4897
4898 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4899 pid, retval);
4900
4901 return retval;
4902 }
4903
4904 static int
4905 debug_to_remove_exec_catchpoint (int pid)
4906 {
4907 int retval;
4908
4909 retval = debug_target.to_remove_exec_catchpoint (pid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4912 pid, retval);
4913
4914 return retval;
4915 }
4916
4917 static int
4918 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4919 {
4920 int has_exited;
4921
4922 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4923
4924 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4925 pid, wait_status, *exit_status, has_exited);
4926
4927 return has_exited;
4928 }
4929
4930 static int
4931 debug_to_can_run (void)
4932 {
4933 int retval;
4934
4935 retval = debug_target.to_can_run ();
4936
4937 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4938
4939 return retval;
4940 }
4941
4942 static struct gdbarch *
4943 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4944 {
4945 struct gdbarch *retval;
4946
4947 retval = debug_target.to_thread_architecture (ops, ptid);
4948
4949 fprintf_unfiltered (gdb_stdlog,
4950 "target_thread_architecture (%s) = %s [%s]\n",
4951 target_pid_to_str (ptid),
4952 host_address_to_string (retval),
4953 gdbarch_bfd_arch_info (retval)->printable_name);
4954 return retval;
4955 }
4956
4957 static void
4958 debug_to_stop (ptid_t ptid)
4959 {
4960 debug_target.to_stop (ptid);
4961
4962 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4963 target_pid_to_str (ptid));
4964 }
4965
4966 static void
4967 debug_to_rcmd (char *command,
4968 struct ui_file *outbuf)
4969 {
4970 debug_target.to_rcmd (command, outbuf);
4971 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4972 }
4973
4974 static char *
4975 debug_to_pid_to_exec_file (int pid)
4976 {
4977 char *exec_file;
4978
4979 exec_file = debug_target.to_pid_to_exec_file (pid);
4980
4981 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4982 pid, exec_file);
4983
4984 return exec_file;
4985 }
4986
4987 static void
4988 setup_target_debug (void)
4989 {
4990 memcpy (&debug_target, &current_target, sizeof debug_target);
4991
4992 current_target.to_open = debug_to_open;
4993 current_target.to_post_attach = debug_to_post_attach;
4994 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4995 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4996 current_target.to_files_info = debug_to_files_info;
4997 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4998 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4999 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5000 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5001 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5002 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5003 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5004 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5005 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5006 current_target.to_watchpoint_addr_within_range
5007 = debug_to_watchpoint_addr_within_range;
5008 current_target.to_region_ok_for_hw_watchpoint
5009 = debug_to_region_ok_for_hw_watchpoint;
5010 current_target.to_can_accel_watchpoint_condition
5011 = debug_to_can_accel_watchpoint_condition;
5012 current_target.to_terminal_init = debug_to_terminal_init;
5013 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5014 current_target.to_terminal_ours_for_output
5015 = debug_to_terminal_ours_for_output;
5016 current_target.to_terminal_ours = debug_to_terminal_ours;
5017 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5018 current_target.to_terminal_info = debug_to_terminal_info;
5019 current_target.to_load = debug_to_load;
5020 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5021 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5022 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5023 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5024 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5025 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5026 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5027 current_target.to_has_exited = debug_to_has_exited;
5028 current_target.to_can_run = debug_to_can_run;
5029 current_target.to_stop = debug_to_stop;
5030 current_target.to_rcmd = debug_to_rcmd;
5031 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5032 current_target.to_thread_architecture = debug_to_thread_architecture;
5033 }
5034 \f
5035
5036 static char targ_desc[] =
5037 "Names of targets and files being debugged.\nShows the entire \
5038 stack of targets currently in use (including the exec-file,\n\
5039 core-file, and process, if any), as well as the symbol file name.";
5040
5041 static void
5042 do_monitor_command (char *cmd,
5043 int from_tty)
5044 {
5045 if ((current_target.to_rcmd
5046 == (void (*) (char *, struct ui_file *)) tcomplain)
5047 || (current_target.to_rcmd == debug_to_rcmd
5048 && (debug_target.to_rcmd
5049 == (void (*) (char *, struct ui_file *)) tcomplain)))
5050 error (_("\"monitor\" command not supported by this target."));
5051 target_rcmd (cmd, gdb_stdtarg);
5052 }
5053
5054 /* Print the name of each layers of our target stack. */
5055
5056 static void
5057 maintenance_print_target_stack (char *cmd, int from_tty)
5058 {
5059 struct target_ops *t;
5060
5061 printf_filtered (_("The current target stack is:\n"));
5062
5063 for (t = target_stack; t != NULL; t = t->beneath)
5064 {
5065 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5066 }
5067 }
5068
5069 /* Controls if async mode is permitted. */
5070 int target_async_permitted = 0;
5071
5072 /* The set command writes to this variable. If the inferior is
5073 executing, target_async_permitted is *not* updated. */
5074 static int target_async_permitted_1 = 0;
5075
5076 static void
5077 set_target_async_command (char *args, int from_tty,
5078 struct cmd_list_element *c)
5079 {
5080 if (have_live_inferiors ())
5081 {
5082 target_async_permitted_1 = target_async_permitted;
5083 error (_("Cannot change this setting while the inferior is running."));
5084 }
5085
5086 target_async_permitted = target_async_permitted_1;
5087 }
5088
5089 static void
5090 show_target_async_command (struct ui_file *file, int from_tty,
5091 struct cmd_list_element *c,
5092 const char *value)
5093 {
5094 fprintf_filtered (file,
5095 _("Controlling the inferior in "
5096 "asynchronous mode is %s.\n"), value);
5097 }
5098
5099 /* Temporary copies of permission settings. */
5100
5101 static int may_write_registers_1 = 1;
5102 static int may_write_memory_1 = 1;
5103 static int may_insert_breakpoints_1 = 1;
5104 static int may_insert_tracepoints_1 = 1;
5105 static int may_insert_fast_tracepoints_1 = 1;
5106 static int may_stop_1 = 1;
5107
5108 /* Make the user-set values match the real values again. */
5109
5110 void
5111 update_target_permissions (void)
5112 {
5113 may_write_registers_1 = may_write_registers;
5114 may_write_memory_1 = may_write_memory;
5115 may_insert_breakpoints_1 = may_insert_breakpoints;
5116 may_insert_tracepoints_1 = may_insert_tracepoints;
5117 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5118 may_stop_1 = may_stop;
5119 }
5120
5121 /* The one function handles (most of) the permission flags in the same
5122 way. */
5123
5124 static void
5125 set_target_permissions (char *args, int from_tty,
5126 struct cmd_list_element *c)
5127 {
5128 if (target_has_execution)
5129 {
5130 update_target_permissions ();
5131 error (_("Cannot change this setting while the inferior is running."));
5132 }
5133
5134 /* Make the real values match the user-changed values. */
5135 may_write_registers = may_write_registers_1;
5136 may_insert_breakpoints = may_insert_breakpoints_1;
5137 may_insert_tracepoints = may_insert_tracepoints_1;
5138 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5139 may_stop = may_stop_1;
5140 update_observer_mode ();
5141 }
5142
5143 /* Set memory write permission independently of observer mode. */
5144
5145 static void
5146 set_write_memory_permission (char *args, int from_tty,
5147 struct cmd_list_element *c)
5148 {
5149 /* Make the real values match the user-changed values. */
5150 may_write_memory = may_write_memory_1;
5151 update_observer_mode ();
5152 }
5153
5154
5155 void
5156 initialize_targets (void)
5157 {
5158 init_dummy_target ();
5159 push_target (&dummy_target);
5160
5161 add_info ("target", target_info, targ_desc);
5162 add_info ("files", target_info, targ_desc);
5163
5164 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5165 Set target debugging."), _("\
5166 Show target debugging."), _("\
5167 When non-zero, target debugging is enabled. Higher numbers are more\n\
5168 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5169 command."),
5170 NULL,
5171 show_targetdebug,
5172 &setdebuglist, &showdebuglist);
5173
5174 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5175 &trust_readonly, _("\
5176 Set mode for reading from readonly sections."), _("\
5177 Show mode for reading from readonly sections."), _("\
5178 When this mode is on, memory reads from readonly sections (such as .text)\n\
5179 will be read from the object file instead of from the target. This will\n\
5180 result in significant performance improvement for remote targets."),
5181 NULL,
5182 show_trust_readonly,
5183 &setlist, &showlist);
5184
5185 add_com ("monitor", class_obscure, do_monitor_command,
5186 _("Send a command to the remote monitor (remote targets only)."));
5187
5188 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5189 _("Print the name of each layer of the internal target stack."),
5190 &maintenanceprintlist);
5191
5192 add_setshow_boolean_cmd ("target-async", no_class,
5193 &target_async_permitted_1, _("\
5194 Set whether gdb controls the inferior in asynchronous mode."), _("\
5195 Show whether gdb controls the inferior in asynchronous mode."), _("\
5196 Tells gdb whether to control the inferior in asynchronous mode."),
5197 set_target_async_command,
5198 show_target_async_command,
5199 &setlist,
5200 &showlist);
5201
5202 add_setshow_boolean_cmd ("may-write-registers", class_support,
5203 &may_write_registers_1, _("\
5204 Set permission to write into registers."), _("\
5205 Show permission to write into registers."), _("\
5206 When this permission is on, GDB may write into the target's registers.\n\
5207 Otherwise, any sort of write attempt will result in an error."),
5208 set_target_permissions, NULL,
5209 &setlist, &showlist);
5210
5211 add_setshow_boolean_cmd ("may-write-memory", class_support,
5212 &may_write_memory_1, _("\
5213 Set permission to write into target memory."), _("\
5214 Show permission to write into target memory."), _("\
5215 When this permission is on, GDB may write into the target's memory.\n\
5216 Otherwise, any sort of write attempt will result in an error."),
5217 set_write_memory_permission, NULL,
5218 &setlist, &showlist);
5219
5220 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5221 &may_insert_breakpoints_1, _("\
5222 Set permission to insert breakpoints in the target."), _("\
5223 Show permission to insert breakpoints in the target."), _("\
5224 When this permission is on, GDB may insert breakpoints in the program.\n\
5225 Otherwise, any sort of insertion attempt will result in an error."),
5226 set_target_permissions, NULL,
5227 &setlist, &showlist);
5228
5229 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5230 &may_insert_tracepoints_1, _("\
5231 Set permission to insert tracepoints in the target."), _("\
5232 Show permission to insert tracepoints in the target."), _("\
5233 When this permission is on, GDB may insert tracepoints in the program.\n\
5234 Otherwise, any sort of insertion attempt will result in an error."),
5235 set_target_permissions, NULL,
5236 &setlist, &showlist);
5237
5238 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5239 &may_insert_fast_tracepoints_1, _("\
5240 Set permission to insert fast tracepoints in the target."), _("\
5241 Show permission to insert fast tracepoints in the target."), _("\
5242 When this permission is on, GDB may insert fast tracepoints.\n\
5243 Otherwise, any sort of insertion attempt will result in an error."),
5244 set_target_permissions, NULL,
5245 &setlist, &showlist);
5246
5247 add_setshow_boolean_cmd ("may-interrupt", class_support,
5248 &may_stop_1, _("\
5249 Set permission to interrupt or signal the target."), _("\
5250 Show permission to interrupt or signal the target."), _("\
5251 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5252 Otherwise, any attempt to interrupt or stop will be ignored."),
5253 set_target_permissions, NULL,
5254 &setlist, &showlist);
5255 }
This page took 0.143105 seconds and 4 git commands to generate.