convert to_remove_exec_catchpoint
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_one (void);
67
68 static int return_minus_one (void);
69
70 static void *return_null (void);
71
72 void target_ignore (void);
73
74 static void target_command (char *, int);
75
76 static struct target_ops *find_default_run_target (char *);
77
78 static target_xfer_partial_ftype default_xfer_partial;
79
80 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
81 ptid_t ptid);
82
83 static int find_default_can_async_p (struct target_ops *ignore);
84
85 static int find_default_is_async_p (struct target_ops *ignore);
86
87 #include "target-delegates.c"
88
89 static void init_dummy_target (void);
90
91 static struct target_ops debug_target;
92
93 static void debug_to_open (char *, int);
94
95 static void debug_to_prepare_to_store (struct target_ops *self,
96 struct regcache *);
97
98 static void debug_to_files_info (struct target_ops *);
99
100 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
107 int, int, int);
108
109 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
110 struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
114 struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (struct target_ops *self,
118 CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (struct target_ops *self,
122 CORE_ADDR, int, int,
123 struct expression *);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
131 CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
134 CORE_ADDR, int, int,
135 struct expression *);
136
137 static void debug_to_terminal_init (struct target_ops *self);
138
139 static void debug_to_terminal_inferior (struct target_ops *self);
140
141 static void debug_to_terminal_ours_for_output (struct target_ops *self);
142
143 static void debug_to_terminal_save_ours (struct target_ops *self);
144
145 static void debug_to_terminal_ours (struct target_ops *self);
146
147 static void debug_to_load (struct target_ops *self, char *, int);
148
149 static int debug_to_can_run (struct target_ops *self);
150
151 static void debug_to_stop (struct target_ops *self, ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static unsigned int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The user just typed 'target' without the name of a target. */
218
219 static void
220 target_command (char *arg, int from_tty)
221 {
222 fputs_filtered ("Argument required (target name). Try `help target'\n",
223 gdb_stdout);
224 }
225
226 /* Default target_has_* methods for process_stratum targets. */
227
228 int
229 default_child_has_all_memory (struct target_ops *ops)
230 {
231 /* If no inferior selected, then we can't read memory here. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_memory (struct target_ops *ops)
240 {
241 /* If no inferior selected, then we can't read memory here. */
242 if (ptid_equal (inferior_ptid, null_ptid))
243 return 0;
244
245 return 1;
246 }
247
248 int
249 default_child_has_stack (struct target_ops *ops)
250 {
251 /* If no inferior selected, there's no stack. */
252 if (ptid_equal (inferior_ptid, null_ptid))
253 return 0;
254
255 return 1;
256 }
257
258 int
259 default_child_has_registers (struct target_ops *ops)
260 {
261 /* Can't read registers from no inferior. */
262 if (ptid_equal (inferior_ptid, null_ptid))
263 return 0;
264
265 return 1;
266 }
267
268 int
269 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
270 {
271 /* If there's no thread selected, then we can't make it run through
272 hoops. */
273 if (ptid_equal (the_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279
280 int
281 target_has_all_memory_1 (void)
282 {
283 struct target_ops *t;
284
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_all_memory (t))
287 return 1;
288
289 return 0;
290 }
291
292 int
293 target_has_memory_1 (void)
294 {
295 struct target_ops *t;
296
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_memory (t))
299 return 1;
300
301 return 0;
302 }
303
304 int
305 target_has_stack_1 (void)
306 {
307 struct target_ops *t;
308
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_stack (t))
311 return 1;
312
313 return 0;
314 }
315
316 int
317 target_has_registers_1 (void)
318 {
319 struct target_ops *t;
320
321 for (t = current_target.beneath; t != NULL; t = t->beneath)
322 if (t->to_has_registers (t))
323 return 1;
324
325 return 0;
326 }
327
328 int
329 target_has_execution_1 (ptid_t the_ptid)
330 {
331 struct target_ops *t;
332
333 for (t = current_target.beneath; t != NULL; t = t->beneath)
334 if (t->to_has_execution (t, the_ptid))
335 return 1;
336
337 return 0;
338 }
339
340 int
341 target_has_execution_current (void)
342 {
343 return target_has_execution_1 (inferior_ptid);
344 }
345
346 /* Complete initialization of T. This ensures that various fields in
347 T are set, if needed by the target implementation. */
348
349 void
350 complete_target_initialization (struct target_ops *t)
351 {
352 /* Provide default values for all "must have" methods. */
353 if (t->to_xfer_partial == NULL)
354 t->to_xfer_partial = default_xfer_partial;
355
356 if (t->to_has_all_memory == NULL)
357 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
358
359 if (t->to_has_memory == NULL)
360 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
361
362 if (t->to_has_stack == NULL)
363 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
364
365 if (t->to_has_registers == NULL)
366 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_execution == NULL)
369 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
370
371 install_delegators (t);
372 }
373
374 /* Add possible target architecture T to the list and add a new
375 command 'target T->to_shortname'. Set COMPLETER as the command's
376 completer if not NULL. */
377
378 void
379 add_target_with_completer (struct target_ops *t,
380 completer_ftype *completer)
381 {
382 struct cmd_list_element *c;
383
384 complete_target_initialization (t);
385
386 if (!target_structs)
387 {
388 target_struct_allocsize = DEFAULT_ALLOCSIZE;
389 target_structs = (struct target_ops **) xmalloc
390 (target_struct_allocsize * sizeof (*target_structs));
391 }
392 if (target_struct_size >= target_struct_allocsize)
393 {
394 target_struct_allocsize *= 2;
395 target_structs = (struct target_ops **)
396 xrealloc ((char *) target_structs,
397 target_struct_allocsize * sizeof (*target_structs));
398 }
399 target_structs[target_struct_size++] = t;
400
401 if (targetlist == NULL)
402 add_prefix_cmd ("target", class_run, target_command, _("\
403 Connect to a target machine or process.\n\
404 The first argument is the type or protocol of the target machine.\n\
405 Remaining arguments are interpreted by the target protocol. For more\n\
406 information on the arguments for a particular protocol, type\n\
407 `help target ' followed by the protocol name."),
408 &targetlist, "target ", 0, &cmdlist);
409 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
410 &targetlist);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
413 }
414
415 /* Add a possible target architecture to the list. */
416
417 void
418 add_target (struct target_ops *t)
419 {
420 add_target_with_completer (t, NULL);
421 }
422
423 /* See target.h. */
424
425 void
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 {
428 struct cmd_list_element *c;
429 char *alt;
430
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 see PR cli/15104. */
433 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
434 alt = xstrprintf ("target %s", t->to_shortname);
435 deprecate_cmd (c, alt);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (&current_target, arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) (&current_target);
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 /* Install the delegators. */
580 install_delegators (&current_target);
581
582 #define INHERIT(FIELD, TARGET) \
583 if (!current_target.FIELD) \
584 current_target.FIELD = (TARGET)->FIELD
585
586 for (t = target_stack; t; t = t->beneath)
587 {
588 INHERIT (to_shortname, t);
589 INHERIT (to_longname, t);
590 INHERIT (to_doc, t);
591 /* Do not inherit to_open. */
592 /* Do not inherit to_close. */
593 /* Do not inherit to_attach. */
594 /* Do not inherit to_post_attach. */
595 INHERIT (to_attach_no_wait, t);
596 /* Do not inherit to_detach. */
597 /* Do not inherit to_disconnect. */
598 /* Do not inherit to_resume. */
599 /* Do not inherit to_wait. */
600 /* Do not inherit to_fetch_registers. */
601 /* Do not inherit to_store_registers. */
602 /* Do not inherit to_prepare_to_store. */
603 INHERIT (deprecated_xfer_memory, t);
604 /* Do not inherit to_files_info. */
605 /* Do not inherit to_insert_breakpoint. */
606 /* Do not inherit to_remove_breakpoint. */
607 /* Do not inherit to_can_use_hw_breakpoint. */
608 /* Do not inherit to_insert_hw_breakpoint. */
609 /* Do not inherit to_remove_hw_breakpoint. */
610 /* Do not inherit to_ranged_break_num_registers. */
611 /* Do not inherit to_insert_watchpoint. */
612 /* Do not inherit to_remove_watchpoint. */
613 /* Do not inherit to_insert_mask_watchpoint. */
614 /* Do not inherit to_remove_mask_watchpoint. */
615 /* Do not inherit to_stopped_data_address. */
616 INHERIT (to_have_steppable_watchpoint, t);
617 INHERIT (to_have_continuable_watchpoint, t);
618 /* Do not inherit to_stopped_by_watchpoint. */
619 /* Do not inherit to_watchpoint_addr_within_range. */
620 /* Do not inherit to_region_ok_for_hw_watchpoint. */
621 /* Do not inherit to_can_accel_watchpoint_condition. */
622 /* Do not inherit to_masked_watch_num_registers. */
623 /* Do not inherit to_terminal_init. */
624 /* Do not inherit to_terminal_inferior. */
625 /* Do not inherit to_terminal_ours_for_output. */
626 /* Do not inherit to_terminal_ours. */
627 /* Do not inherit to_terminal_save_ours. */
628 /* Do not inherit to_terminal_info. */
629 /* Do not inherit to_kill. */
630 /* Do not inherit to_load. */
631 /* Do no inherit to_create_inferior. */
632 /* Do not inherit to_post_startup_inferior. */
633 /* Do not inherit to_insert_fork_catchpoint. */
634 /* Do not inherit to_remove_fork_catchpoint. */
635 /* Do not inherit to_insert_vfork_catchpoint. */
636 /* Do not inherit to_remove_vfork_catchpoint. */
637 /* Do not inherit to_follow_fork. */
638 /* Do not inherit to_insert_exec_catchpoint. */
639 /* Do not inherit to_remove_exec_catchpoint. */
640 INHERIT (to_set_syscall_catchpoint, t);
641 INHERIT (to_has_exited, t);
642 /* Do not inherit to_mourn_inferior. */
643 INHERIT (to_can_run, t);
644 /* Do not inherit to_pass_signals. */
645 /* Do not inherit to_program_signals. */
646 /* Do not inherit to_thread_alive. */
647 /* Do not inherit to_find_new_threads. */
648 /* Do not inherit to_pid_to_str. */
649 INHERIT (to_extra_thread_info, t);
650 INHERIT (to_thread_name, t);
651 INHERIT (to_stop, t);
652 /* Do not inherit to_xfer_partial. */
653 /* Do not inherit to_rcmd. */
654 INHERIT (to_pid_to_exec_file, t);
655 INHERIT (to_log_command, t);
656 INHERIT (to_stratum, t);
657 /* Do not inherit to_has_all_memory. */
658 /* Do not inherit to_has_memory. */
659 /* Do not inherit to_has_stack. */
660 /* Do not inherit to_has_registers. */
661 /* Do not inherit to_has_execution. */
662 INHERIT (to_has_thread_control, t);
663 /* Do not inherit to_can_async_p. */
664 /* Do not inherit to_is_async_p. */
665 /* Do not inherit to_async. */
666 INHERIT (to_find_memory_regions, t);
667 INHERIT (to_make_corefile_notes, t);
668 INHERIT (to_get_bookmark, t);
669 INHERIT (to_goto_bookmark, t);
670 /* Do not inherit to_get_thread_local_address. */
671 INHERIT (to_can_execute_reverse, t);
672 INHERIT (to_execution_direction, t);
673 INHERIT (to_thread_architecture, t);
674 /* Do not inherit to_read_description. */
675 INHERIT (to_get_ada_task_ptid, t);
676 /* Do not inherit to_search_memory. */
677 INHERIT (to_supports_multi_process, t);
678 INHERIT (to_supports_enable_disable_tracepoint, t);
679 INHERIT (to_supports_string_tracing, t);
680 INHERIT (to_trace_init, t);
681 INHERIT (to_download_tracepoint, t);
682 INHERIT (to_can_download_tracepoint, t);
683 INHERIT (to_download_trace_state_variable, t);
684 INHERIT (to_enable_tracepoint, t);
685 INHERIT (to_disable_tracepoint, t);
686 INHERIT (to_trace_set_readonly_regions, t);
687 INHERIT (to_trace_start, t);
688 INHERIT (to_get_trace_status, t);
689 INHERIT (to_get_tracepoint_status, t);
690 INHERIT (to_trace_stop, t);
691 INHERIT (to_trace_find, t);
692 INHERIT (to_get_trace_state_variable_value, t);
693 INHERIT (to_save_trace_data, t);
694 INHERIT (to_upload_tracepoints, t);
695 INHERIT (to_upload_trace_state_variables, t);
696 INHERIT (to_get_raw_trace_data, t);
697 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
698 INHERIT (to_set_disconnected_tracing, t);
699 INHERIT (to_set_circular_trace_buffer, t);
700 INHERIT (to_set_trace_buffer_size, t);
701 INHERIT (to_set_trace_notes, t);
702 INHERIT (to_get_tib_address, t);
703 INHERIT (to_set_permissions, t);
704 INHERIT (to_static_tracepoint_marker_at, t);
705 INHERIT (to_static_tracepoint_markers_by_strid, t);
706 INHERIT (to_traceframe_info, t);
707 INHERIT (to_use_agent, t);
708 INHERIT (to_can_use_agent, t);
709 INHERIT (to_augmented_libraries_svr4_read, t);
710 INHERIT (to_magic, t);
711 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
712 INHERIT (to_can_run_breakpoint_commands, t);
713 /* Do not inherit to_memory_map. */
714 /* Do not inherit to_flash_erase. */
715 /* Do not inherit to_flash_done. */
716 }
717 #undef INHERIT
718
719 /* Clean up a target struct so it no longer has any zero pointers in
720 it. Some entries are defaulted to a method that print an error,
721 others are hard-wired to a standard recursive default. */
722
723 #define de_fault(field, value) \
724 if (!current_target.field) \
725 current_target.field = value
726
727 de_fault (to_open,
728 (void (*) (char *, int))
729 tcomplain);
730 de_fault (to_close,
731 (void (*) (struct target_ops *))
732 target_ignore);
733 de_fault (deprecated_xfer_memory,
734 (int (*) (CORE_ADDR, gdb_byte *, int, int,
735 struct mem_attrib *, struct target_ops *))
736 nomemory);
737 de_fault (to_set_syscall_catchpoint,
738 (int (*) (struct target_ops *, int, int, int, int, int *))
739 return_one);
740 de_fault (to_has_exited,
741 (int (*) (struct target_ops *, int, int, int *))
742 return_zero);
743 de_fault (to_can_run,
744 (int (*) (struct target_ops *))
745 return_zero);
746 de_fault (to_extra_thread_info,
747 (char *(*) (struct target_ops *, struct thread_info *))
748 return_null);
749 de_fault (to_thread_name,
750 (char *(*) (struct target_ops *, struct thread_info *))
751 return_null);
752 de_fault (to_stop,
753 (void (*) (struct target_ops *, ptid_t))
754 target_ignore);
755 de_fault (to_pid_to_exec_file,
756 (char *(*) (struct target_ops *, int))
757 return_null);
758 de_fault (to_thread_architecture,
759 default_thread_architecture);
760 current_target.to_read_description = NULL;
761 de_fault (to_get_ada_task_ptid,
762 (ptid_t (*) (struct target_ops *, long, long))
763 default_get_ada_task_ptid);
764 de_fault (to_supports_multi_process,
765 (int (*) (struct target_ops *))
766 return_zero);
767 de_fault (to_supports_enable_disable_tracepoint,
768 (int (*) (struct target_ops *))
769 return_zero);
770 de_fault (to_supports_string_tracing,
771 (int (*) (struct target_ops *))
772 return_zero);
773 de_fault (to_trace_init,
774 (void (*) (struct target_ops *))
775 tcomplain);
776 de_fault (to_download_tracepoint,
777 (void (*) (struct target_ops *, struct bp_location *))
778 tcomplain);
779 de_fault (to_can_download_tracepoint,
780 (int (*) (struct target_ops *))
781 return_zero);
782 de_fault (to_download_trace_state_variable,
783 (void (*) (struct target_ops *, struct trace_state_variable *))
784 tcomplain);
785 de_fault (to_enable_tracepoint,
786 (void (*) (struct target_ops *, struct bp_location *))
787 tcomplain);
788 de_fault (to_disable_tracepoint,
789 (void (*) (struct target_ops *, struct bp_location *))
790 tcomplain);
791 de_fault (to_trace_set_readonly_regions,
792 (void (*) (struct target_ops *))
793 tcomplain);
794 de_fault (to_trace_start,
795 (void (*) (struct target_ops *))
796 tcomplain);
797 de_fault (to_get_trace_status,
798 (int (*) (struct target_ops *, struct trace_status *))
799 return_minus_one);
800 de_fault (to_get_tracepoint_status,
801 (void (*) (struct target_ops *, struct breakpoint *,
802 struct uploaded_tp *))
803 tcomplain);
804 de_fault (to_trace_stop,
805 (void (*) (struct target_ops *))
806 tcomplain);
807 de_fault (to_trace_find,
808 (int (*) (struct target_ops *,
809 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
810 return_minus_one);
811 de_fault (to_get_trace_state_variable_value,
812 (int (*) (struct target_ops *, int, LONGEST *))
813 return_zero);
814 de_fault (to_save_trace_data,
815 (int (*) (struct target_ops *, const char *))
816 tcomplain);
817 de_fault (to_upload_tracepoints,
818 (int (*) (struct target_ops *, struct uploaded_tp **))
819 return_zero);
820 de_fault (to_upload_trace_state_variables,
821 (int (*) (struct target_ops *, struct uploaded_tsv **))
822 return_zero);
823 de_fault (to_get_raw_trace_data,
824 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
825 tcomplain);
826 de_fault (to_get_min_fast_tracepoint_insn_len,
827 (int (*) (struct target_ops *))
828 return_minus_one);
829 de_fault (to_set_disconnected_tracing,
830 (void (*) (struct target_ops *, int))
831 target_ignore);
832 de_fault (to_set_circular_trace_buffer,
833 (void (*) (struct target_ops *, int))
834 target_ignore);
835 de_fault (to_set_trace_buffer_size,
836 (void (*) (struct target_ops *, LONGEST))
837 target_ignore);
838 de_fault (to_set_trace_notes,
839 (int (*) (struct target_ops *,
840 const char *, const char *, const char *))
841 return_zero);
842 de_fault (to_get_tib_address,
843 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
844 tcomplain);
845 de_fault (to_set_permissions,
846 (void (*) (struct target_ops *))
847 target_ignore);
848 de_fault (to_static_tracepoint_marker_at,
849 (int (*) (struct target_ops *,
850 CORE_ADDR, struct static_tracepoint_marker *))
851 return_zero);
852 de_fault (to_static_tracepoint_markers_by_strid,
853 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
854 const char *))
855 tcomplain);
856 de_fault (to_traceframe_info,
857 (struct traceframe_info * (*) (struct target_ops *))
858 return_null);
859 de_fault (to_supports_evaluation_of_breakpoint_conditions,
860 (int (*) (struct target_ops *))
861 return_zero);
862 de_fault (to_can_run_breakpoint_commands,
863 (int (*) (struct target_ops *))
864 return_zero);
865 de_fault (to_use_agent,
866 (int (*) (struct target_ops *, int))
867 tcomplain);
868 de_fault (to_can_use_agent,
869 (int (*) (struct target_ops *))
870 return_zero);
871 de_fault (to_augmented_libraries_svr4_read,
872 (int (*) (struct target_ops *))
873 return_zero);
874 de_fault (to_execution_direction, default_execution_direction);
875
876 #undef de_fault
877
878 /* Finally, position the target-stack beneath the squashed
879 "current_target". That way code looking for a non-inherited
880 target method can quickly and simply find it. */
881 current_target.beneath = target_stack;
882
883 if (targetdebug)
884 setup_target_debug ();
885 }
886
887 /* Push a new target type into the stack of the existing target accessors,
888 possibly superseding some of the existing accessors.
889
890 Rather than allow an empty stack, we always have the dummy target at
891 the bottom stratum, so we can call the function vectors without
892 checking them. */
893
894 void
895 push_target (struct target_ops *t)
896 {
897 struct target_ops **cur;
898
899 /* Check magic number. If wrong, it probably means someone changed
900 the struct definition, but not all the places that initialize one. */
901 if (t->to_magic != OPS_MAGIC)
902 {
903 fprintf_unfiltered (gdb_stderr,
904 "Magic number of %s target struct wrong\n",
905 t->to_shortname);
906 internal_error (__FILE__, __LINE__,
907 _("failed internal consistency check"));
908 }
909
910 /* Find the proper stratum to install this target in. */
911 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
912 {
913 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
914 break;
915 }
916
917 /* If there's already targets at this stratum, remove them. */
918 /* FIXME: cagney/2003-10-15: I think this should be popping all
919 targets to CUR, and not just those at this stratum level. */
920 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
921 {
922 /* There's already something at this stratum level. Close it,
923 and un-hook it from the stack. */
924 struct target_ops *tmp = (*cur);
925
926 (*cur) = (*cur)->beneath;
927 tmp->beneath = NULL;
928 target_close (tmp);
929 }
930
931 /* We have removed all targets in our stratum, now add the new one. */
932 t->beneath = (*cur);
933 (*cur) = t;
934
935 update_current_target ();
936 }
937
938 /* Remove a target_ops vector from the stack, wherever it may be.
939 Return how many times it was removed (0 or 1). */
940
941 int
942 unpush_target (struct target_ops *t)
943 {
944 struct target_ops **cur;
945 struct target_ops *tmp;
946
947 if (t->to_stratum == dummy_stratum)
948 internal_error (__FILE__, __LINE__,
949 _("Attempt to unpush the dummy target"));
950
951 /* Look for the specified target. Note that we assume that a target
952 can only occur once in the target stack. */
953
954 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
955 {
956 if ((*cur) == t)
957 break;
958 }
959
960 /* If we don't find target_ops, quit. Only open targets should be
961 closed. */
962 if ((*cur) == NULL)
963 return 0;
964
965 /* Unchain the target. */
966 tmp = (*cur);
967 (*cur) = (*cur)->beneath;
968 tmp->beneath = NULL;
969
970 update_current_target ();
971
972 /* Finally close the target. Note we do this after unchaining, so
973 any target method calls from within the target_close
974 implementation don't end up in T anymore. */
975 target_close (t);
976
977 return 1;
978 }
979
980 void
981 pop_all_targets_above (enum strata above_stratum)
982 {
983 while ((int) (current_target.to_stratum) > (int) above_stratum)
984 {
985 if (!unpush_target (target_stack))
986 {
987 fprintf_unfiltered (gdb_stderr,
988 "pop_all_targets couldn't find target %s\n",
989 target_stack->to_shortname);
990 internal_error (__FILE__, __LINE__,
991 _("failed internal consistency check"));
992 break;
993 }
994 }
995 }
996
997 void
998 pop_all_targets (void)
999 {
1000 pop_all_targets_above (dummy_stratum);
1001 }
1002
1003 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1004
1005 int
1006 target_is_pushed (struct target_ops *t)
1007 {
1008 struct target_ops **cur;
1009
1010 /* Check magic number. If wrong, it probably means someone changed
1011 the struct definition, but not all the places that initialize one. */
1012 if (t->to_magic != OPS_MAGIC)
1013 {
1014 fprintf_unfiltered (gdb_stderr,
1015 "Magic number of %s target struct wrong\n",
1016 t->to_shortname);
1017 internal_error (__FILE__, __LINE__,
1018 _("failed internal consistency check"));
1019 }
1020
1021 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1022 if (*cur == t)
1023 return 1;
1024
1025 return 0;
1026 }
1027
1028 /* Using the objfile specified in OBJFILE, find the address for the
1029 current thread's thread-local storage with offset OFFSET. */
1030 CORE_ADDR
1031 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1032 {
1033 volatile CORE_ADDR addr = 0;
1034 struct target_ops *target;
1035
1036 for (target = current_target.beneath;
1037 target != NULL;
1038 target = target->beneath)
1039 {
1040 if (target->to_get_thread_local_address != NULL)
1041 break;
1042 }
1043
1044 if (target != NULL
1045 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1046 {
1047 ptid_t ptid = inferior_ptid;
1048 volatile struct gdb_exception ex;
1049
1050 TRY_CATCH (ex, RETURN_MASK_ALL)
1051 {
1052 CORE_ADDR lm_addr;
1053
1054 /* Fetch the load module address for this objfile. */
1055 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1056 objfile);
1057 /* If it's 0, throw the appropriate exception. */
1058 if (lm_addr == 0)
1059 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1060 _("TLS load module not found"));
1061
1062 addr = target->to_get_thread_local_address (target, ptid,
1063 lm_addr, offset);
1064 }
1065 /* If an error occurred, print TLS related messages here. Otherwise,
1066 throw the error to some higher catcher. */
1067 if (ex.reason < 0)
1068 {
1069 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1070
1071 switch (ex.error)
1072 {
1073 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1074 error (_("Cannot find thread-local variables "
1075 "in this thread library."));
1076 break;
1077 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1078 if (objfile_is_library)
1079 error (_("Cannot find shared library `%s' in dynamic"
1080 " linker's load module list"), objfile_name (objfile));
1081 else
1082 error (_("Cannot find executable file `%s' in dynamic"
1083 " linker's load module list"), objfile_name (objfile));
1084 break;
1085 case TLS_NOT_ALLOCATED_YET_ERROR:
1086 if (objfile_is_library)
1087 error (_("The inferior has not yet allocated storage for"
1088 " thread-local variables in\n"
1089 "the shared library `%s'\n"
1090 "for %s"),
1091 objfile_name (objfile), target_pid_to_str (ptid));
1092 else
1093 error (_("The inferior has not yet allocated storage for"
1094 " thread-local variables in\n"
1095 "the executable `%s'\n"
1096 "for %s"),
1097 objfile_name (objfile), target_pid_to_str (ptid));
1098 break;
1099 case TLS_GENERIC_ERROR:
1100 if (objfile_is_library)
1101 error (_("Cannot find thread-local storage for %s, "
1102 "shared library %s:\n%s"),
1103 target_pid_to_str (ptid),
1104 objfile_name (objfile), ex.message);
1105 else
1106 error (_("Cannot find thread-local storage for %s, "
1107 "executable file %s:\n%s"),
1108 target_pid_to_str (ptid),
1109 objfile_name (objfile), ex.message);
1110 break;
1111 default:
1112 throw_exception (ex);
1113 break;
1114 }
1115 }
1116 }
1117 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1118 TLS is an ABI-specific thing. But we don't do that yet. */
1119 else
1120 error (_("Cannot find thread-local variables on this target"));
1121
1122 return addr;
1123 }
1124
1125 const char *
1126 target_xfer_status_to_string (enum target_xfer_status err)
1127 {
1128 #define CASE(X) case X: return #X
1129 switch (err)
1130 {
1131 CASE(TARGET_XFER_E_IO);
1132 CASE(TARGET_XFER_E_UNAVAILABLE);
1133 default:
1134 return "<unknown>";
1135 }
1136 #undef CASE
1137 };
1138
1139
1140 #undef MIN
1141 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1142
1143 /* target_read_string -- read a null terminated string, up to LEN bytes,
1144 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1145 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1146 is responsible for freeing it. Return the number of bytes successfully
1147 read. */
1148
1149 int
1150 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1151 {
1152 int tlen, offset, i;
1153 gdb_byte buf[4];
1154 int errcode = 0;
1155 char *buffer;
1156 int buffer_allocated;
1157 char *bufptr;
1158 unsigned int nbytes_read = 0;
1159
1160 gdb_assert (string);
1161
1162 /* Small for testing. */
1163 buffer_allocated = 4;
1164 buffer = xmalloc (buffer_allocated);
1165 bufptr = buffer;
1166
1167 while (len > 0)
1168 {
1169 tlen = MIN (len, 4 - (memaddr & 3));
1170 offset = memaddr & 3;
1171
1172 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1173 if (errcode != 0)
1174 {
1175 /* The transfer request might have crossed the boundary to an
1176 unallocated region of memory. Retry the transfer, requesting
1177 a single byte. */
1178 tlen = 1;
1179 offset = 0;
1180 errcode = target_read_memory (memaddr, buf, 1);
1181 if (errcode != 0)
1182 goto done;
1183 }
1184
1185 if (bufptr - buffer + tlen > buffer_allocated)
1186 {
1187 unsigned int bytes;
1188
1189 bytes = bufptr - buffer;
1190 buffer_allocated *= 2;
1191 buffer = xrealloc (buffer, buffer_allocated);
1192 bufptr = buffer + bytes;
1193 }
1194
1195 for (i = 0; i < tlen; i++)
1196 {
1197 *bufptr++ = buf[i + offset];
1198 if (buf[i + offset] == '\000')
1199 {
1200 nbytes_read += i + 1;
1201 goto done;
1202 }
1203 }
1204
1205 memaddr += tlen;
1206 len -= tlen;
1207 nbytes_read += tlen;
1208 }
1209 done:
1210 *string = buffer;
1211 if (errnop != NULL)
1212 *errnop = errcode;
1213 return nbytes_read;
1214 }
1215
1216 struct target_section_table *
1217 target_get_section_table (struct target_ops *target)
1218 {
1219 struct target_ops *t;
1220
1221 if (targetdebug)
1222 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1223
1224 for (t = target; t != NULL; t = t->beneath)
1225 if (t->to_get_section_table != NULL)
1226 return (*t->to_get_section_table) (t);
1227
1228 return NULL;
1229 }
1230
1231 /* Find a section containing ADDR. */
1232
1233 struct target_section *
1234 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1235 {
1236 struct target_section_table *table = target_get_section_table (target);
1237 struct target_section *secp;
1238
1239 if (table == NULL)
1240 return NULL;
1241
1242 for (secp = table->sections; secp < table->sections_end; secp++)
1243 {
1244 if (addr >= secp->addr && addr < secp->endaddr)
1245 return secp;
1246 }
1247 return NULL;
1248 }
1249
1250 /* Read memory from the live target, even if currently inspecting a
1251 traceframe. The return is the same as that of target_read. */
1252
1253 static enum target_xfer_status
1254 target_read_live_memory (enum target_object object,
1255 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1256 ULONGEST *xfered_len)
1257 {
1258 enum target_xfer_status ret;
1259 struct cleanup *cleanup;
1260
1261 /* Switch momentarily out of tfind mode so to access live memory.
1262 Note that this must not clear global state, such as the frame
1263 cache, which must still remain valid for the previous traceframe.
1264 We may be _building_ the frame cache at this point. */
1265 cleanup = make_cleanup_restore_traceframe_number ();
1266 set_traceframe_number (-1);
1267
1268 ret = target_xfer_partial (current_target.beneath, object, NULL,
1269 myaddr, NULL, memaddr, len, xfered_len);
1270
1271 do_cleanups (cleanup);
1272 return ret;
1273 }
1274
1275 /* Using the set of read-only target sections of OPS, read live
1276 read-only memory. Note that the actual reads start from the
1277 top-most target again.
1278
1279 For interface/parameters/return description see target.h,
1280 to_xfer_partial. */
1281
1282 static enum target_xfer_status
1283 memory_xfer_live_readonly_partial (struct target_ops *ops,
1284 enum target_object object,
1285 gdb_byte *readbuf, ULONGEST memaddr,
1286 ULONGEST len, ULONGEST *xfered_len)
1287 {
1288 struct target_section *secp;
1289 struct target_section_table *table;
1290
1291 secp = target_section_by_addr (ops, memaddr);
1292 if (secp != NULL
1293 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1294 secp->the_bfd_section)
1295 & SEC_READONLY))
1296 {
1297 struct target_section *p;
1298 ULONGEST memend = memaddr + len;
1299
1300 table = target_get_section_table (ops);
1301
1302 for (p = table->sections; p < table->sections_end; p++)
1303 {
1304 if (memaddr >= p->addr)
1305 {
1306 if (memend <= p->endaddr)
1307 {
1308 /* Entire transfer is within this section. */
1309 return target_read_live_memory (object, memaddr,
1310 readbuf, len, xfered_len);
1311 }
1312 else if (memaddr >= p->endaddr)
1313 {
1314 /* This section ends before the transfer starts. */
1315 continue;
1316 }
1317 else
1318 {
1319 /* This section overlaps the transfer. Just do half. */
1320 len = p->endaddr - memaddr;
1321 return target_read_live_memory (object, memaddr,
1322 readbuf, len, xfered_len);
1323 }
1324 }
1325 }
1326 }
1327
1328 return TARGET_XFER_EOF;
1329 }
1330
1331 /* Read memory from more than one valid target. A core file, for
1332 instance, could have some of memory but delegate other bits to
1333 the target below it. So, we must manually try all targets. */
1334
1335 static enum target_xfer_status
1336 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1337 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1338 ULONGEST *xfered_len)
1339 {
1340 enum target_xfer_status res;
1341
1342 do
1343 {
1344 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1345 readbuf, writebuf, memaddr, len,
1346 xfered_len);
1347 if (res == TARGET_XFER_OK)
1348 break;
1349
1350 /* Stop if the target reports that the memory is not available. */
1351 if (res == TARGET_XFER_E_UNAVAILABLE)
1352 break;
1353
1354 /* We want to continue past core files to executables, but not
1355 past a running target's memory. */
1356 if (ops->to_has_all_memory (ops))
1357 break;
1358
1359 ops = ops->beneath;
1360 }
1361 while (ops != NULL);
1362
1363 return res;
1364 }
1365
1366 /* Perform a partial memory transfer.
1367 For docs see target.h, to_xfer_partial. */
1368
1369 static enum target_xfer_status
1370 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1371 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1372 ULONGEST len, ULONGEST *xfered_len)
1373 {
1374 enum target_xfer_status res;
1375 int reg_len;
1376 struct mem_region *region;
1377 struct inferior *inf;
1378
1379 /* For accesses to unmapped overlay sections, read directly from
1380 files. Must do this first, as MEMADDR may need adjustment. */
1381 if (readbuf != NULL && overlay_debugging)
1382 {
1383 struct obj_section *section = find_pc_overlay (memaddr);
1384
1385 if (pc_in_unmapped_range (memaddr, section))
1386 {
1387 struct target_section_table *table
1388 = target_get_section_table (ops);
1389 const char *section_name = section->the_bfd_section->name;
1390
1391 memaddr = overlay_mapped_address (memaddr, section);
1392 return section_table_xfer_memory_partial (readbuf, writebuf,
1393 memaddr, len, xfered_len,
1394 table->sections,
1395 table->sections_end,
1396 section_name);
1397 }
1398 }
1399
1400 /* Try the executable files, if "trust-readonly-sections" is set. */
1401 if (readbuf != NULL && trust_readonly)
1402 {
1403 struct target_section *secp;
1404 struct target_section_table *table;
1405
1406 secp = target_section_by_addr (ops, memaddr);
1407 if (secp != NULL
1408 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1409 secp->the_bfd_section)
1410 & SEC_READONLY))
1411 {
1412 table = target_get_section_table (ops);
1413 return section_table_xfer_memory_partial (readbuf, writebuf,
1414 memaddr, len, xfered_len,
1415 table->sections,
1416 table->sections_end,
1417 NULL);
1418 }
1419 }
1420
1421 /* If reading unavailable memory in the context of traceframes, and
1422 this address falls within a read-only section, fallback to
1423 reading from live memory. */
1424 if (readbuf != NULL && get_traceframe_number () != -1)
1425 {
1426 VEC(mem_range_s) *available;
1427
1428 /* If we fail to get the set of available memory, then the
1429 target does not support querying traceframe info, and so we
1430 attempt reading from the traceframe anyway (assuming the
1431 target implements the old QTro packet then). */
1432 if (traceframe_available_memory (&available, memaddr, len))
1433 {
1434 struct cleanup *old_chain;
1435
1436 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1437
1438 if (VEC_empty (mem_range_s, available)
1439 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1440 {
1441 /* Don't read into the traceframe's available
1442 memory. */
1443 if (!VEC_empty (mem_range_s, available))
1444 {
1445 LONGEST oldlen = len;
1446
1447 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1448 gdb_assert (len <= oldlen);
1449 }
1450
1451 do_cleanups (old_chain);
1452
1453 /* This goes through the topmost target again. */
1454 res = memory_xfer_live_readonly_partial (ops, object,
1455 readbuf, memaddr,
1456 len, xfered_len);
1457 if (res == TARGET_XFER_OK)
1458 return TARGET_XFER_OK;
1459 else
1460 {
1461 /* No use trying further, we know some memory starting
1462 at MEMADDR isn't available. */
1463 *xfered_len = len;
1464 return TARGET_XFER_E_UNAVAILABLE;
1465 }
1466 }
1467
1468 /* Don't try to read more than how much is available, in
1469 case the target implements the deprecated QTro packet to
1470 cater for older GDBs (the target's knowledge of read-only
1471 sections may be outdated by now). */
1472 len = VEC_index (mem_range_s, available, 0)->length;
1473
1474 do_cleanups (old_chain);
1475 }
1476 }
1477
1478 /* Try GDB's internal data cache. */
1479 region = lookup_mem_region (memaddr);
1480 /* region->hi == 0 means there's no upper bound. */
1481 if (memaddr + len < region->hi || region->hi == 0)
1482 reg_len = len;
1483 else
1484 reg_len = region->hi - memaddr;
1485
1486 switch (region->attrib.mode)
1487 {
1488 case MEM_RO:
1489 if (writebuf != NULL)
1490 return TARGET_XFER_E_IO;
1491 break;
1492
1493 case MEM_WO:
1494 if (readbuf != NULL)
1495 return TARGET_XFER_E_IO;
1496 break;
1497
1498 case MEM_FLASH:
1499 /* We only support writing to flash during "load" for now. */
1500 if (writebuf != NULL)
1501 error (_("Writing to flash memory forbidden in this context"));
1502 break;
1503
1504 case MEM_NONE:
1505 return TARGET_XFER_E_IO;
1506 }
1507
1508 if (!ptid_equal (inferior_ptid, null_ptid))
1509 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1510 else
1511 inf = NULL;
1512
1513 if (inf != NULL
1514 /* The dcache reads whole cache lines; that doesn't play well
1515 with reading from a trace buffer, because reading outside of
1516 the collected memory range fails. */
1517 && get_traceframe_number () == -1
1518 && (region->attrib.cache
1519 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1520 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1521 {
1522 DCACHE *dcache = target_dcache_get_or_init ();
1523 int l;
1524
1525 if (readbuf != NULL)
1526 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1527 else
1528 /* FIXME drow/2006-08-09: If we're going to preserve const
1529 correctness dcache_xfer_memory should take readbuf and
1530 writebuf. */
1531 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1532 reg_len, 1);
1533 if (l <= 0)
1534 return TARGET_XFER_E_IO;
1535 else
1536 {
1537 *xfered_len = (ULONGEST) l;
1538 return TARGET_XFER_OK;
1539 }
1540 }
1541
1542 /* If none of those methods found the memory we wanted, fall back
1543 to a target partial transfer. Normally a single call to
1544 to_xfer_partial is enough; if it doesn't recognize an object
1545 it will call the to_xfer_partial of the next target down.
1546 But for memory this won't do. Memory is the only target
1547 object which can be read from more than one valid target.
1548 A core file, for instance, could have some of memory but
1549 delegate other bits to the target below it. So, we must
1550 manually try all targets. */
1551
1552 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1553 xfered_len);
1554
1555 /* Make sure the cache gets updated no matter what - if we are writing
1556 to the stack. Even if this write is not tagged as such, we still need
1557 to update the cache. */
1558
1559 if (res == TARGET_XFER_OK
1560 && inf != NULL
1561 && writebuf != NULL
1562 && target_dcache_init_p ()
1563 && !region->attrib.cache
1564 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1565 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1566 {
1567 DCACHE *dcache = target_dcache_get ();
1568
1569 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1570 }
1571
1572 /* If we still haven't got anything, return the last error. We
1573 give up. */
1574 return res;
1575 }
1576
1577 /* Perform a partial memory transfer. For docs see target.h,
1578 to_xfer_partial. */
1579
1580 static enum target_xfer_status
1581 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1582 gdb_byte *readbuf, const gdb_byte *writebuf,
1583 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1584 {
1585 enum target_xfer_status res;
1586
1587 /* Zero length requests are ok and require no work. */
1588 if (len == 0)
1589 return TARGET_XFER_EOF;
1590
1591 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1592 breakpoint insns, thus hiding out from higher layers whether
1593 there are software breakpoints inserted in the code stream. */
1594 if (readbuf != NULL)
1595 {
1596 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1597 xfered_len);
1598
1599 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1600 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1601 }
1602 else
1603 {
1604 void *buf;
1605 struct cleanup *old_chain;
1606
1607 /* A large write request is likely to be partially satisfied
1608 by memory_xfer_partial_1. We will continually malloc
1609 and free a copy of the entire write request for breakpoint
1610 shadow handling even though we only end up writing a small
1611 subset of it. Cap writes to 4KB to mitigate this. */
1612 len = min (4096, len);
1613
1614 buf = xmalloc (len);
1615 old_chain = make_cleanup (xfree, buf);
1616 memcpy (buf, writebuf, len);
1617
1618 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1619 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1620 xfered_len);
1621
1622 do_cleanups (old_chain);
1623 }
1624
1625 return res;
1626 }
1627
1628 static void
1629 restore_show_memory_breakpoints (void *arg)
1630 {
1631 show_memory_breakpoints = (uintptr_t) arg;
1632 }
1633
1634 struct cleanup *
1635 make_show_memory_breakpoints_cleanup (int show)
1636 {
1637 int current = show_memory_breakpoints;
1638
1639 show_memory_breakpoints = show;
1640 return make_cleanup (restore_show_memory_breakpoints,
1641 (void *) (uintptr_t) current);
1642 }
1643
1644 /* For docs see target.h, to_xfer_partial. */
1645
1646 enum target_xfer_status
1647 target_xfer_partial (struct target_ops *ops,
1648 enum target_object object, const char *annex,
1649 gdb_byte *readbuf, const gdb_byte *writebuf,
1650 ULONGEST offset, ULONGEST len,
1651 ULONGEST *xfered_len)
1652 {
1653 enum target_xfer_status retval;
1654
1655 gdb_assert (ops->to_xfer_partial != NULL);
1656
1657 /* Transfer is done when LEN is zero. */
1658 if (len == 0)
1659 return TARGET_XFER_EOF;
1660
1661 if (writebuf && !may_write_memory)
1662 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1663 core_addr_to_string_nz (offset), plongest (len));
1664
1665 *xfered_len = 0;
1666
1667 /* If this is a memory transfer, let the memory-specific code
1668 have a look at it instead. Memory transfers are more
1669 complicated. */
1670 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1671 || object == TARGET_OBJECT_CODE_MEMORY)
1672 retval = memory_xfer_partial (ops, object, readbuf,
1673 writebuf, offset, len, xfered_len);
1674 else if (object == TARGET_OBJECT_RAW_MEMORY)
1675 {
1676 /* Request the normal memory object from other layers. */
1677 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1678 xfered_len);
1679 }
1680 else
1681 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1682 writebuf, offset, len, xfered_len);
1683
1684 if (targetdebug)
1685 {
1686 const unsigned char *myaddr = NULL;
1687
1688 fprintf_unfiltered (gdb_stdlog,
1689 "%s:target_xfer_partial "
1690 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1691 ops->to_shortname,
1692 (int) object,
1693 (annex ? annex : "(null)"),
1694 host_address_to_string (readbuf),
1695 host_address_to_string (writebuf),
1696 core_addr_to_string_nz (offset),
1697 pulongest (len), retval,
1698 pulongest (*xfered_len));
1699
1700 if (readbuf)
1701 myaddr = readbuf;
1702 if (writebuf)
1703 myaddr = writebuf;
1704 if (retval == TARGET_XFER_OK && myaddr != NULL)
1705 {
1706 int i;
1707
1708 fputs_unfiltered (", bytes =", gdb_stdlog);
1709 for (i = 0; i < *xfered_len; i++)
1710 {
1711 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1712 {
1713 if (targetdebug < 2 && i > 0)
1714 {
1715 fprintf_unfiltered (gdb_stdlog, " ...");
1716 break;
1717 }
1718 fprintf_unfiltered (gdb_stdlog, "\n");
1719 }
1720
1721 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1722 }
1723 }
1724
1725 fputc_unfiltered ('\n', gdb_stdlog);
1726 }
1727
1728 /* Check implementations of to_xfer_partial update *XFERED_LEN
1729 properly. Do assertion after printing debug messages, so that we
1730 can find more clues on assertion failure from debugging messages. */
1731 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1732 gdb_assert (*xfered_len > 0);
1733
1734 return retval;
1735 }
1736
1737 /* Read LEN bytes of target memory at address MEMADDR, placing the
1738 results in GDB's memory at MYADDR. Returns either 0 for success or
1739 TARGET_XFER_E_IO if any error occurs.
1740
1741 If an error occurs, no guarantee is made about the contents of the data at
1742 MYADDR. In particular, the caller should not depend upon partial reads
1743 filling the buffer with good data. There is no way for the caller to know
1744 how much good data might have been transfered anyway. Callers that can
1745 deal with partial reads should call target_read (which will retry until
1746 it makes no progress, and then return how much was transferred). */
1747
1748 int
1749 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1750 {
1751 /* Dispatch to the topmost target, not the flattened current_target.
1752 Memory accesses check target->to_has_(all_)memory, and the
1753 flattened target doesn't inherit those. */
1754 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1755 myaddr, memaddr, len) == len)
1756 return 0;
1757 else
1758 return TARGET_XFER_E_IO;
1759 }
1760
1761 /* Like target_read_memory, but specify explicitly that this is a read
1762 from the target's raw memory. That is, this read bypasses the
1763 dcache, breakpoint shadowing, etc. */
1764
1765 int
1766 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1767 {
1768 /* See comment in target_read_memory about why the request starts at
1769 current_target.beneath. */
1770 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1771 myaddr, memaddr, len) == len)
1772 return 0;
1773 else
1774 return TARGET_XFER_E_IO;
1775 }
1776
1777 /* Like target_read_memory, but specify explicitly that this is a read from
1778 the target's stack. This may trigger different cache behavior. */
1779
1780 int
1781 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1782 {
1783 /* See comment in target_read_memory about why the request starts at
1784 current_target.beneath. */
1785 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1786 myaddr, memaddr, len) == len)
1787 return 0;
1788 else
1789 return TARGET_XFER_E_IO;
1790 }
1791
1792 /* Like target_read_memory, but specify explicitly that this is a read from
1793 the target's code. This may trigger different cache behavior. */
1794
1795 int
1796 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1797 {
1798 /* See comment in target_read_memory about why the request starts at
1799 current_target.beneath. */
1800 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1801 myaddr, memaddr, len) == len)
1802 return 0;
1803 else
1804 return TARGET_XFER_E_IO;
1805 }
1806
1807 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1808 Returns either 0 for success or TARGET_XFER_E_IO if any
1809 error occurs. If an error occurs, no guarantee is made about how
1810 much data got written. Callers that can deal with partial writes
1811 should call target_write. */
1812
1813 int
1814 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1815 {
1816 /* See comment in target_read_memory about why the request starts at
1817 current_target.beneath. */
1818 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1819 myaddr, memaddr, len) == len)
1820 return 0;
1821 else
1822 return TARGET_XFER_E_IO;
1823 }
1824
1825 /* Write LEN bytes from MYADDR to target raw memory at address
1826 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1827 if any error occurs. If an error occurs, no guarantee is made
1828 about how much data got written. Callers that can deal with
1829 partial writes should call target_write. */
1830
1831 int
1832 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1833 {
1834 /* See comment in target_read_memory about why the request starts at
1835 current_target.beneath. */
1836 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1837 myaddr, memaddr, len) == len)
1838 return 0;
1839 else
1840 return TARGET_XFER_E_IO;
1841 }
1842
1843 /* Fetch the target's memory map. */
1844
1845 VEC(mem_region_s) *
1846 target_memory_map (void)
1847 {
1848 VEC(mem_region_s) *result;
1849 struct mem_region *last_one, *this_one;
1850 int ix;
1851 struct target_ops *t;
1852
1853 if (targetdebug)
1854 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1855
1856 for (t = current_target.beneath; t != NULL; t = t->beneath)
1857 if (t->to_memory_map != NULL)
1858 break;
1859
1860 if (t == NULL)
1861 return NULL;
1862
1863 result = t->to_memory_map (t);
1864 if (result == NULL)
1865 return NULL;
1866
1867 qsort (VEC_address (mem_region_s, result),
1868 VEC_length (mem_region_s, result),
1869 sizeof (struct mem_region), mem_region_cmp);
1870
1871 /* Check that regions do not overlap. Simultaneously assign
1872 a numbering for the "mem" commands to use to refer to
1873 each region. */
1874 last_one = NULL;
1875 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1876 {
1877 this_one->number = ix;
1878
1879 if (last_one && last_one->hi > this_one->lo)
1880 {
1881 warning (_("Overlapping regions in memory map: ignoring"));
1882 VEC_free (mem_region_s, result);
1883 return NULL;
1884 }
1885 last_one = this_one;
1886 }
1887
1888 return result;
1889 }
1890
1891 void
1892 target_flash_erase (ULONGEST address, LONGEST length)
1893 {
1894 struct target_ops *t;
1895
1896 for (t = current_target.beneath; t != NULL; t = t->beneath)
1897 if (t->to_flash_erase != NULL)
1898 {
1899 if (targetdebug)
1900 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1901 hex_string (address), phex (length, 0));
1902 t->to_flash_erase (t, address, length);
1903 return;
1904 }
1905
1906 tcomplain ();
1907 }
1908
1909 void
1910 target_flash_done (void)
1911 {
1912 struct target_ops *t;
1913
1914 for (t = current_target.beneath; t != NULL; t = t->beneath)
1915 if (t->to_flash_done != NULL)
1916 {
1917 if (targetdebug)
1918 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1919 t->to_flash_done (t);
1920 return;
1921 }
1922
1923 tcomplain ();
1924 }
1925
1926 static void
1927 show_trust_readonly (struct ui_file *file, int from_tty,
1928 struct cmd_list_element *c, const char *value)
1929 {
1930 fprintf_filtered (file,
1931 _("Mode for reading from readonly sections is %s.\n"),
1932 value);
1933 }
1934
1935 /* More generic transfers. */
1936
1937 static enum target_xfer_status
1938 default_xfer_partial (struct target_ops *ops, enum target_object object,
1939 const char *annex, gdb_byte *readbuf,
1940 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1941 ULONGEST *xfered_len)
1942 {
1943 if (object == TARGET_OBJECT_MEMORY
1944 && ops->deprecated_xfer_memory != NULL)
1945 /* If available, fall back to the target's
1946 "deprecated_xfer_memory" method. */
1947 {
1948 int xfered = -1;
1949
1950 errno = 0;
1951 if (writebuf != NULL)
1952 {
1953 void *buffer = xmalloc (len);
1954 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1955
1956 memcpy (buffer, writebuf, len);
1957 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1958 1/*write*/, NULL, ops);
1959 do_cleanups (cleanup);
1960 }
1961 if (readbuf != NULL)
1962 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1963 0/*read*/, NULL, ops);
1964 if (xfered > 0)
1965 {
1966 *xfered_len = (ULONGEST) xfered;
1967 return TARGET_XFER_E_IO;
1968 }
1969 else if (xfered == 0 && errno == 0)
1970 /* "deprecated_xfer_memory" uses 0, cross checked against
1971 ERRNO as one indication of an error. */
1972 return TARGET_XFER_EOF;
1973 else
1974 return TARGET_XFER_E_IO;
1975 }
1976 else
1977 {
1978 gdb_assert (ops->beneath != NULL);
1979 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1980 readbuf, writebuf, offset, len,
1981 xfered_len);
1982 }
1983 }
1984
1985 /* Target vector read/write partial wrapper functions. */
1986
1987 static enum target_xfer_status
1988 target_read_partial (struct target_ops *ops,
1989 enum target_object object,
1990 const char *annex, gdb_byte *buf,
1991 ULONGEST offset, ULONGEST len,
1992 ULONGEST *xfered_len)
1993 {
1994 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1995 xfered_len);
1996 }
1997
1998 static enum target_xfer_status
1999 target_write_partial (struct target_ops *ops,
2000 enum target_object object,
2001 const char *annex, const gdb_byte *buf,
2002 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2003 {
2004 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2005 xfered_len);
2006 }
2007
2008 /* Wrappers to perform the full transfer. */
2009
2010 /* For docs on target_read see target.h. */
2011
2012 LONGEST
2013 target_read (struct target_ops *ops,
2014 enum target_object object,
2015 const char *annex, gdb_byte *buf,
2016 ULONGEST offset, LONGEST len)
2017 {
2018 LONGEST xfered = 0;
2019
2020 while (xfered < len)
2021 {
2022 ULONGEST xfered_len;
2023 enum target_xfer_status status;
2024
2025 status = target_read_partial (ops, object, annex,
2026 (gdb_byte *) buf + xfered,
2027 offset + xfered, len - xfered,
2028 &xfered_len);
2029
2030 /* Call an observer, notifying them of the xfer progress? */
2031 if (status == TARGET_XFER_EOF)
2032 return xfered;
2033 else if (status == TARGET_XFER_OK)
2034 {
2035 xfered += xfered_len;
2036 QUIT;
2037 }
2038 else
2039 return -1;
2040
2041 }
2042 return len;
2043 }
2044
2045 /* Assuming that the entire [begin, end) range of memory cannot be
2046 read, try to read whatever subrange is possible to read.
2047
2048 The function returns, in RESULT, either zero or one memory block.
2049 If there's a readable subrange at the beginning, it is completely
2050 read and returned. Any further readable subrange will not be read.
2051 Otherwise, if there's a readable subrange at the end, it will be
2052 completely read and returned. Any readable subranges before it
2053 (obviously, not starting at the beginning), will be ignored. In
2054 other cases -- either no readable subrange, or readable subrange(s)
2055 that is neither at the beginning, or end, nothing is returned.
2056
2057 The purpose of this function is to handle a read across a boundary
2058 of accessible memory in a case when memory map is not available.
2059 The above restrictions are fine for this case, but will give
2060 incorrect results if the memory is 'patchy'. However, supporting
2061 'patchy' memory would require trying to read every single byte,
2062 and it seems unacceptable solution. Explicit memory map is
2063 recommended for this case -- and target_read_memory_robust will
2064 take care of reading multiple ranges then. */
2065
2066 static void
2067 read_whatever_is_readable (struct target_ops *ops,
2068 ULONGEST begin, ULONGEST end,
2069 VEC(memory_read_result_s) **result)
2070 {
2071 gdb_byte *buf = xmalloc (end - begin);
2072 ULONGEST current_begin = begin;
2073 ULONGEST current_end = end;
2074 int forward;
2075 memory_read_result_s r;
2076 ULONGEST xfered_len;
2077
2078 /* If we previously failed to read 1 byte, nothing can be done here. */
2079 if (end - begin <= 1)
2080 {
2081 xfree (buf);
2082 return;
2083 }
2084
2085 /* Check that either first or the last byte is readable, and give up
2086 if not. This heuristic is meant to permit reading accessible memory
2087 at the boundary of accessible region. */
2088 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2089 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2090 {
2091 forward = 1;
2092 ++current_begin;
2093 }
2094 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2095 buf + (end-begin) - 1, end - 1, 1,
2096 &xfered_len) == TARGET_XFER_OK)
2097 {
2098 forward = 0;
2099 --current_end;
2100 }
2101 else
2102 {
2103 xfree (buf);
2104 return;
2105 }
2106
2107 /* Loop invariant is that the [current_begin, current_end) was previously
2108 found to be not readable as a whole.
2109
2110 Note loop condition -- if the range has 1 byte, we can't divide the range
2111 so there's no point trying further. */
2112 while (current_end - current_begin > 1)
2113 {
2114 ULONGEST first_half_begin, first_half_end;
2115 ULONGEST second_half_begin, second_half_end;
2116 LONGEST xfer;
2117 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2118
2119 if (forward)
2120 {
2121 first_half_begin = current_begin;
2122 first_half_end = middle;
2123 second_half_begin = middle;
2124 second_half_end = current_end;
2125 }
2126 else
2127 {
2128 first_half_begin = middle;
2129 first_half_end = current_end;
2130 second_half_begin = current_begin;
2131 second_half_end = middle;
2132 }
2133
2134 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2135 buf + (first_half_begin - begin),
2136 first_half_begin,
2137 first_half_end - first_half_begin);
2138
2139 if (xfer == first_half_end - first_half_begin)
2140 {
2141 /* This half reads up fine. So, the error must be in the
2142 other half. */
2143 current_begin = second_half_begin;
2144 current_end = second_half_end;
2145 }
2146 else
2147 {
2148 /* This half is not readable. Because we've tried one byte, we
2149 know some part of this half if actually redable. Go to the next
2150 iteration to divide again and try to read.
2151
2152 We don't handle the other half, because this function only tries
2153 to read a single readable subrange. */
2154 current_begin = first_half_begin;
2155 current_end = first_half_end;
2156 }
2157 }
2158
2159 if (forward)
2160 {
2161 /* The [begin, current_begin) range has been read. */
2162 r.begin = begin;
2163 r.end = current_begin;
2164 r.data = buf;
2165 }
2166 else
2167 {
2168 /* The [current_end, end) range has been read. */
2169 LONGEST rlen = end - current_end;
2170
2171 r.data = xmalloc (rlen);
2172 memcpy (r.data, buf + current_end - begin, rlen);
2173 r.begin = current_end;
2174 r.end = end;
2175 xfree (buf);
2176 }
2177 VEC_safe_push(memory_read_result_s, (*result), &r);
2178 }
2179
2180 void
2181 free_memory_read_result_vector (void *x)
2182 {
2183 VEC(memory_read_result_s) *v = x;
2184 memory_read_result_s *current;
2185 int ix;
2186
2187 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2188 {
2189 xfree (current->data);
2190 }
2191 VEC_free (memory_read_result_s, v);
2192 }
2193
2194 VEC(memory_read_result_s) *
2195 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2196 {
2197 VEC(memory_read_result_s) *result = 0;
2198
2199 LONGEST xfered = 0;
2200 while (xfered < len)
2201 {
2202 struct mem_region *region = lookup_mem_region (offset + xfered);
2203 LONGEST rlen;
2204
2205 /* If there is no explicit region, a fake one should be created. */
2206 gdb_assert (region);
2207
2208 if (region->hi == 0)
2209 rlen = len - xfered;
2210 else
2211 rlen = region->hi - offset;
2212
2213 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2214 {
2215 /* Cannot read this region. Note that we can end up here only
2216 if the region is explicitly marked inaccessible, or
2217 'inaccessible-by-default' is in effect. */
2218 xfered += rlen;
2219 }
2220 else
2221 {
2222 LONGEST to_read = min (len - xfered, rlen);
2223 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2224
2225 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2226 (gdb_byte *) buffer,
2227 offset + xfered, to_read);
2228 /* Call an observer, notifying them of the xfer progress? */
2229 if (xfer <= 0)
2230 {
2231 /* Got an error reading full chunk. See if maybe we can read
2232 some subrange. */
2233 xfree (buffer);
2234 read_whatever_is_readable (ops, offset + xfered,
2235 offset + xfered + to_read, &result);
2236 xfered += to_read;
2237 }
2238 else
2239 {
2240 struct memory_read_result r;
2241 r.data = buffer;
2242 r.begin = offset + xfered;
2243 r.end = r.begin + xfer;
2244 VEC_safe_push (memory_read_result_s, result, &r);
2245 xfered += xfer;
2246 }
2247 QUIT;
2248 }
2249 }
2250 return result;
2251 }
2252
2253
2254 /* An alternative to target_write with progress callbacks. */
2255
2256 LONGEST
2257 target_write_with_progress (struct target_ops *ops,
2258 enum target_object object,
2259 const char *annex, const gdb_byte *buf,
2260 ULONGEST offset, LONGEST len,
2261 void (*progress) (ULONGEST, void *), void *baton)
2262 {
2263 LONGEST xfered = 0;
2264
2265 /* Give the progress callback a chance to set up. */
2266 if (progress)
2267 (*progress) (0, baton);
2268
2269 while (xfered < len)
2270 {
2271 ULONGEST xfered_len;
2272 enum target_xfer_status status;
2273
2274 status = target_write_partial (ops, object, annex,
2275 (gdb_byte *) buf + xfered,
2276 offset + xfered, len - xfered,
2277 &xfered_len);
2278
2279 if (status == TARGET_XFER_EOF)
2280 return xfered;
2281 if (TARGET_XFER_STATUS_ERROR_P (status))
2282 return -1;
2283
2284 gdb_assert (status == TARGET_XFER_OK);
2285 if (progress)
2286 (*progress) (xfered_len, baton);
2287
2288 xfered += xfered_len;
2289 QUIT;
2290 }
2291 return len;
2292 }
2293
2294 /* For docs on target_write see target.h. */
2295
2296 LONGEST
2297 target_write (struct target_ops *ops,
2298 enum target_object object,
2299 const char *annex, const gdb_byte *buf,
2300 ULONGEST offset, LONGEST len)
2301 {
2302 return target_write_with_progress (ops, object, annex, buf, offset, len,
2303 NULL, NULL);
2304 }
2305
2306 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2307 the size of the transferred data. PADDING additional bytes are
2308 available in *BUF_P. This is a helper function for
2309 target_read_alloc; see the declaration of that function for more
2310 information. */
2311
2312 static LONGEST
2313 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2314 const char *annex, gdb_byte **buf_p, int padding)
2315 {
2316 size_t buf_alloc, buf_pos;
2317 gdb_byte *buf;
2318
2319 /* This function does not have a length parameter; it reads the
2320 entire OBJECT). Also, it doesn't support objects fetched partly
2321 from one target and partly from another (in a different stratum,
2322 e.g. a core file and an executable). Both reasons make it
2323 unsuitable for reading memory. */
2324 gdb_assert (object != TARGET_OBJECT_MEMORY);
2325
2326 /* Start by reading up to 4K at a time. The target will throttle
2327 this number down if necessary. */
2328 buf_alloc = 4096;
2329 buf = xmalloc (buf_alloc);
2330 buf_pos = 0;
2331 while (1)
2332 {
2333 ULONGEST xfered_len;
2334 enum target_xfer_status status;
2335
2336 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2337 buf_pos, buf_alloc - buf_pos - padding,
2338 &xfered_len);
2339
2340 if (status == TARGET_XFER_EOF)
2341 {
2342 /* Read all there was. */
2343 if (buf_pos == 0)
2344 xfree (buf);
2345 else
2346 *buf_p = buf;
2347 return buf_pos;
2348 }
2349 else if (status != TARGET_XFER_OK)
2350 {
2351 /* An error occurred. */
2352 xfree (buf);
2353 return TARGET_XFER_E_IO;
2354 }
2355
2356 buf_pos += xfered_len;
2357
2358 /* If the buffer is filling up, expand it. */
2359 if (buf_alloc < buf_pos * 2)
2360 {
2361 buf_alloc *= 2;
2362 buf = xrealloc (buf, buf_alloc);
2363 }
2364
2365 QUIT;
2366 }
2367 }
2368
2369 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2370 the size of the transferred data. See the declaration in "target.h"
2371 function for more information about the return value. */
2372
2373 LONGEST
2374 target_read_alloc (struct target_ops *ops, enum target_object object,
2375 const char *annex, gdb_byte **buf_p)
2376 {
2377 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2378 }
2379
2380 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2381 returned as a string, allocated using xmalloc. If an error occurs
2382 or the transfer is unsupported, NULL is returned. Empty objects
2383 are returned as allocated but empty strings. A warning is issued
2384 if the result contains any embedded NUL bytes. */
2385
2386 char *
2387 target_read_stralloc (struct target_ops *ops, enum target_object object,
2388 const char *annex)
2389 {
2390 gdb_byte *buffer;
2391 char *bufstr;
2392 LONGEST i, transferred;
2393
2394 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2395 bufstr = (char *) buffer;
2396
2397 if (transferred < 0)
2398 return NULL;
2399
2400 if (transferred == 0)
2401 return xstrdup ("");
2402
2403 bufstr[transferred] = 0;
2404
2405 /* Check for embedded NUL bytes; but allow trailing NULs. */
2406 for (i = strlen (bufstr); i < transferred; i++)
2407 if (bufstr[i] != 0)
2408 {
2409 warning (_("target object %d, annex %s, "
2410 "contained unexpected null characters"),
2411 (int) object, annex ? annex : "(none)");
2412 break;
2413 }
2414
2415 return bufstr;
2416 }
2417
2418 /* Memory transfer methods. */
2419
2420 void
2421 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2422 LONGEST len)
2423 {
2424 /* This method is used to read from an alternate, non-current
2425 target. This read must bypass the overlay support (as symbols
2426 don't match this target), and GDB's internal cache (wrong cache
2427 for this target). */
2428 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2429 != len)
2430 memory_error (TARGET_XFER_E_IO, addr);
2431 }
2432
2433 ULONGEST
2434 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2435 int len, enum bfd_endian byte_order)
2436 {
2437 gdb_byte buf[sizeof (ULONGEST)];
2438
2439 gdb_assert (len <= sizeof (buf));
2440 get_target_memory (ops, addr, buf, len);
2441 return extract_unsigned_integer (buf, len, byte_order);
2442 }
2443
2444 /* See target.h. */
2445
2446 int
2447 target_insert_breakpoint (struct gdbarch *gdbarch,
2448 struct bp_target_info *bp_tgt)
2449 {
2450 if (!may_insert_breakpoints)
2451 {
2452 warning (_("May not insert breakpoints"));
2453 return 1;
2454 }
2455
2456 return current_target.to_insert_breakpoint (&current_target,
2457 gdbarch, bp_tgt);
2458 }
2459
2460 /* See target.h. */
2461
2462 int
2463 target_remove_breakpoint (struct gdbarch *gdbarch,
2464 struct bp_target_info *bp_tgt)
2465 {
2466 /* This is kind of a weird case to handle, but the permission might
2467 have been changed after breakpoints were inserted - in which case
2468 we should just take the user literally and assume that any
2469 breakpoints should be left in place. */
2470 if (!may_insert_breakpoints)
2471 {
2472 warning (_("May not remove breakpoints"));
2473 return 1;
2474 }
2475
2476 return current_target.to_remove_breakpoint (&current_target,
2477 gdbarch, bp_tgt);
2478 }
2479
2480 static void
2481 target_info (char *args, int from_tty)
2482 {
2483 struct target_ops *t;
2484 int has_all_mem = 0;
2485
2486 if (symfile_objfile != NULL)
2487 printf_unfiltered (_("Symbols from \"%s\".\n"),
2488 objfile_name (symfile_objfile));
2489
2490 for (t = target_stack; t != NULL; t = t->beneath)
2491 {
2492 if (!(*t->to_has_memory) (t))
2493 continue;
2494
2495 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2496 continue;
2497 if (has_all_mem)
2498 printf_unfiltered (_("\tWhile running this, "
2499 "GDB does not access memory from...\n"));
2500 printf_unfiltered ("%s:\n", t->to_longname);
2501 (t->to_files_info) (t);
2502 has_all_mem = (*t->to_has_all_memory) (t);
2503 }
2504 }
2505
2506 /* This function is called before any new inferior is created, e.g.
2507 by running a program, attaching, or connecting to a target.
2508 It cleans up any state from previous invocations which might
2509 change between runs. This is a subset of what target_preopen
2510 resets (things which might change between targets). */
2511
2512 void
2513 target_pre_inferior (int from_tty)
2514 {
2515 /* Clear out solib state. Otherwise the solib state of the previous
2516 inferior might have survived and is entirely wrong for the new
2517 target. This has been observed on GNU/Linux using glibc 2.3. How
2518 to reproduce:
2519
2520 bash$ ./foo&
2521 [1] 4711
2522 bash$ ./foo&
2523 [1] 4712
2524 bash$ gdb ./foo
2525 [...]
2526 (gdb) attach 4711
2527 (gdb) detach
2528 (gdb) attach 4712
2529 Cannot access memory at address 0xdeadbeef
2530 */
2531
2532 /* In some OSs, the shared library list is the same/global/shared
2533 across inferiors. If code is shared between processes, so are
2534 memory regions and features. */
2535 if (!gdbarch_has_global_solist (target_gdbarch ()))
2536 {
2537 no_shared_libraries (NULL, from_tty);
2538
2539 invalidate_target_mem_regions ();
2540
2541 target_clear_description ();
2542 }
2543
2544 agent_capability_invalidate ();
2545 }
2546
2547 /* Callback for iterate_over_inferiors. Gets rid of the given
2548 inferior. */
2549
2550 static int
2551 dispose_inferior (struct inferior *inf, void *args)
2552 {
2553 struct thread_info *thread;
2554
2555 thread = any_thread_of_process (inf->pid);
2556 if (thread)
2557 {
2558 switch_to_thread (thread->ptid);
2559
2560 /* Core inferiors actually should be detached, not killed. */
2561 if (target_has_execution)
2562 target_kill ();
2563 else
2564 target_detach (NULL, 0);
2565 }
2566
2567 return 0;
2568 }
2569
2570 /* This is to be called by the open routine before it does
2571 anything. */
2572
2573 void
2574 target_preopen (int from_tty)
2575 {
2576 dont_repeat ();
2577
2578 if (have_inferiors ())
2579 {
2580 if (!from_tty
2581 || !have_live_inferiors ()
2582 || query (_("A program is being debugged already. Kill it? ")))
2583 iterate_over_inferiors (dispose_inferior, NULL);
2584 else
2585 error (_("Program not killed."));
2586 }
2587
2588 /* Calling target_kill may remove the target from the stack. But if
2589 it doesn't (which seems like a win for UDI), remove it now. */
2590 /* Leave the exec target, though. The user may be switching from a
2591 live process to a core of the same program. */
2592 pop_all_targets_above (file_stratum);
2593
2594 target_pre_inferior (from_tty);
2595 }
2596
2597 /* Detach a target after doing deferred register stores. */
2598
2599 void
2600 target_detach (const char *args, int from_tty)
2601 {
2602 struct target_ops* t;
2603
2604 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2605 /* Don't remove global breakpoints here. They're removed on
2606 disconnection from the target. */
2607 ;
2608 else
2609 /* If we're in breakpoints-always-inserted mode, have to remove
2610 them before detaching. */
2611 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2612
2613 prepare_for_detach ();
2614
2615 current_target.to_detach (&current_target, args, from_tty);
2616 if (targetdebug)
2617 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2618 args, from_tty);
2619 }
2620
2621 void
2622 target_disconnect (char *args, int from_tty)
2623 {
2624 struct target_ops *t;
2625
2626 /* If we're in breakpoints-always-inserted mode or if breakpoints
2627 are global across processes, we have to remove them before
2628 disconnecting. */
2629 remove_breakpoints ();
2630
2631 for (t = current_target.beneath; t != NULL; t = t->beneath)
2632 if (t->to_disconnect != NULL)
2633 {
2634 if (targetdebug)
2635 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2636 args, from_tty);
2637 t->to_disconnect (t, args, from_tty);
2638 return;
2639 }
2640
2641 tcomplain ();
2642 }
2643
2644 ptid_t
2645 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2646 {
2647 struct target_ops *t;
2648 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2649 status, options);
2650
2651 if (targetdebug)
2652 {
2653 char *status_string;
2654 char *options_string;
2655
2656 status_string = target_waitstatus_to_string (status);
2657 options_string = target_options_to_string (options);
2658 fprintf_unfiltered (gdb_stdlog,
2659 "target_wait (%d, status, options={%s})"
2660 " = %d, %s\n",
2661 ptid_get_pid (ptid), options_string,
2662 ptid_get_pid (retval), status_string);
2663 xfree (status_string);
2664 xfree (options_string);
2665 }
2666
2667 return retval;
2668 }
2669
2670 char *
2671 target_pid_to_str (ptid_t ptid)
2672 {
2673 struct target_ops *t;
2674
2675 for (t = current_target.beneath; t != NULL; t = t->beneath)
2676 {
2677 if (t->to_pid_to_str != NULL)
2678 return (*t->to_pid_to_str) (t, ptid);
2679 }
2680
2681 return normal_pid_to_str (ptid);
2682 }
2683
2684 char *
2685 target_thread_name (struct thread_info *info)
2686 {
2687 struct target_ops *t;
2688
2689 for (t = current_target.beneath; t != NULL; t = t->beneath)
2690 {
2691 if (t->to_thread_name != NULL)
2692 return (*t->to_thread_name) (t, info);
2693 }
2694
2695 return NULL;
2696 }
2697
2698 void
2699 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2700 {
2701 struct target_ops *t;
2702
2703 target_dcache_invalidate ();
2704
2705 current_target.to_resume (&current_target, ptid, step, signal);
2706 if (targetdebug)
2707 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2708 ptid_get_pid (ptid),
2709 step ? "step" : "continue",
2710 gdb_signal_to_name (signal));
2711
2712 registers_changed_ptid (ptid);
2713 set_executing (ptid, 1);
2714 set_running (ptid, 1);
2715 clear_inline_frame_state (ptid);
2716 }
2717
2718 void
2719 target_pass_signals (int numsigs, unsigned char *pass_signals)
2720 {
2721 struct target_ops *t;
2722
2723 for (t = current_target.beneath; t != NULL; t = t->beneath)
2724 {
2725 if (t->to_pass_signals != NULL)
2726 {
2727 if (targetdebug)
2728 {
2729 int i;
2730
2731 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2732 numsigs);
2733
2734 for (i = 0; i < numsigs; i++)
2735 if (pass_signals[i])
2736 fprintf_unfiltered (gdb_stdlog, " %s",
2737 gdb_signal_to_name (i));
2738
2739 fprintf_unfiltered (gdb_stdlog, " })\n");
2740 }
2741
2742 (*t->to_pass_signals) (t, numsigs, pass_signals);
2743 return;
2744 }
2745 }
2746 }
2747
2748 void
2749 target_program_signals (int numsigs, unsigned char *program_signals)
2750 {
2751 struct target_ops *t;
2752
2753 for (t = current_target.beneath; t != NULL; t = t->beneath)
2754 {
2755 if (t->to_program_signals != NULL)
2756 {
2757 if (targetdebug)
2758 {
2759 int i;
2760
2761 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2762 numsigs);
2763
2764 for (i = 0; i < numsigs; i++)
2765 if (program_signals[i])
2766 fprintf_unfiltered (gdb_stdlog, " %s",
2767 gdb_signal_to_name (i));
2768
2769 fprintf_unfiltered (gdb_stdlog, " })\n");
2770 }
2771
2772 (*t->to_program_signals) (t, numsigs, program_signals);
2773 return;
2774 }
2775 }
2776 }
2777
2778 /* Look through the list of possible targets for a target that can
2779 follow forks. */
2780
2781 int
2782 target_follow_fork (int follow_child, int detach_fork)
2783 {
2784 struct target_ops *t;
2785
2786 for (t = current_target.beneath; t != NULL; t = t->beneath)
2787 {
2788 if (t->to_follow_fork != NULL)
2789 {
2790 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2791
2792 if (targetdebug)
2793 fprintf_unfiltered (gdb_stdlog,
2794 "target_follow_fork (%d, %d) = %d\n",
2795 follow_child, detach_fork, retval);
2796 return retval;
2797 }
2798 }
2799
2800 /* Some target returned a fork event, but did not know how to follow it. */
2801 internal_error (__FILE__, __LINE__,
2802 _("could not find a target to follow fork"));
2803 }
2804
2805 void
2806 target_mourn_inferior (void)
2807 {
2808 struct target_ops *t;
2809
2810 for (t = current_target.beneath; t != NULL; t = t->beneath)
2811 {
2812 if (t->to_mourn_inferior != NULL)
2813 {
2814 t->to_mourn_inferior (t);
2815 if (targetdebug)
2816 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2817
2818 /* We no longer need to keep handles on any of the object files.
2819 Make sure to release them to avoid unnecessarily locking any
2820 of them while we're not actually debugging. */
2821 bfd_cache_close_all ();
2822
2823 return;
2824 }
2825 }
2826
2827 internal_error (__FILE__, __LINE__,
2828 _("could not find a target to follow mourn inferior"));
2829 }
2830
2831 /* Look for a target which can describe architectural features, starting
2832 from TARGET. If we find one, return its description. */
2833
2834 const struct target_desc *
2835 target_read_description (struct target_ops *target)
2836 {
2837 struct target_ops *t;
2838
2839 for (t = target; t != NULL; t = t->beneath)
2840 if (t->to_read_description != NULL)
2841 {
2842 const struct target_desc *tdesc;
2843
2844 tdesc = t->to_read_description (t);
2845 if (tdesc)
2846 return tdesc;
2847 }
2848
2849 return NULL;
2850 }
2851
2852 /* The default implementation of to_search_memory.
2853 This implements a basic search of memory, reading target memory and
2854 performing the search here (as opposed to performing the search in on the
2855 target side with, for example, gdbserver). */
2856
2857 int
2858 simple_search_memory (struct target_ops *ops,
2859 CORE_ADDR start_addr, ULONGEST search_space_len,
2860 const gdb_byte *pattern, ULONGEST pattern_len,
2861 CORE_ADDR *found_addrp)
2862 {
2863 /* NOTE: also defined in find.c testcase. */
2864 #define SEARCH_CHUNK_SIZE 16000
2865 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2866 /* Buffer to hold memory contents for searching. */
2867 gdb_byte *search_buf;
2868 unsigned search_buf_size;
2869 struct cleanup *old_cleanups;
2870
2871 search_buf_size = chunk_size + pattern_len - 1;
2872
2873 /* No point in trying to allocate a buffer larger than the search space. */
2874 if (search_space_len < search_buf_size)
2875 search_buf_size = search_space_len;
2876
2877 search_buf = malloc (search_buf_size);
2878 if (search_buf == NULL)
2879 error (_("Unable to allocate memory to perform the search."));
2880 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2881
2882 /* Prime the search buffer. */
2883
2884 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2885 search_buf, start_addr, search_buf_size) != search_buf_size)
2886 {
2887 warning (_("Unable to access %s bytes of target "
2888 "memory at %s, halting search."),
2889 pulongest (search_buf_size), hex_string (start_addr));
2890 do_cleanups (old_cleanups);
2891 return -1;
2892 }
2893
2894 /* Perform the search.
2895
2896 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2897 When we've scanned N bytes we copy the trailing bytes to the start and
2898 read in another N bytes. */
2899
2900 while (search_space_len >= pattern_len)
2901 {
2902 gdb_byte *found_ptr;
2903 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2904
2905 found_ptr = memmem (search_buf, nr_search_bytes,
2906 pattern, pattern_len);
2907
2908 if (found_ptr != NULL)
2909 {
2910 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2911
2912 *found_addrp = found_addr;
2913 do_cleanups (old_cleanups);
2914 return 1;
2915 }
2916
2917 /* Not found in this chunk, skip to next chunk. */
2918
2919 /* Don't let search_space_len wrap here, it's unsigned. */
2920 if (search_space_len >= chunk_size)
2921 search_space_len -= chunk_size;
2922 else
2923 search_space_len = 0;
2924
2925 if (search_space_len >= pattern_len)
2926 {
2927 unsigned keep_len = search_buf_size - chunk_size;
2928 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2929 int nr_to_read;
2930
2931 /* Copy the trailing part of the previous iteration to the front
2932 of the buffer for the next iteration. */
2933 gdb_assert (keep_len == pattern_len - 1);
2934 memcpy (search_buf, search_buf + chunk_size, keep_len);
2935
2936 nr_to_read = min (search_space_len - keep_len, chunk_size);
2937
2938 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2939 search_buf + keep_len, read_addr,
2940 nr_to_read) != nr_to_read)
2941 {
2942 warning (_("Unable to access %s bytes of target "
2943 "memory at %s, halting search."),
2944 plongest (nr_to_read),
2945 hex_string (read_addr));
2946 do_cleanups (old_cleanups);
2947 return -1;
2948 }
2949
2950 start_addr += chunk_size;
2951 }
2952 }
2953
2954 /* Not found. */
2955
2956 do_cleanups (old_cleanups);
2957 return 0;
2958 }
2959
2960 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2961 sequence of bytes in PATTERN with length PATTERN_LEN.
2962
2963 The result is 1 if found, 0 if not found, and -1 if there was an error
2964 requiring halting of the search (e.g. memory read error).
2965 If the pattern is found the address is recorded in FOUND_ADDRP. */
2966
2967 int
2968 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2969 const gdb_byte *pattern, ULONGEST pattern_len,
2970 CORE_ADDR *found_addrp)
2971 {
2972 struct target_ops *t;
2973 int found;
2974
2975 /* We don't use INHERIT to set current_target.to_search_memory,
2976 so we have to scan the target stack and handle targetdebug
2977 ourselves. */
2978
2979 if (targetdebug)
2980 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2981 hex_string (start_addr));
2982
2983 for (t = current_target.beneath; t != NULL; t = t->beneath)
2984 if (t->to_search_memory != NULL)
2985 break;
2986
2987 if (t != NULL)
2988 {
2989 found = t->to_search_memory (t, start_addr, search_space_len,
2990 pattern, pattern_len, found_addrp);
2991 }
2992 else
2993 {
2994 /* If a special version of to_search_memory isn't available, use the
2995 simple version. */
2996 found = simple_search_memory (current_target.beneath,
2997 start_addr, search_space_len,
2998 pattern, pattern_len, found_addrp);
2999 }
3000
3001 if (targetdebug)
3002 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3003
3004 return found;
3005 }
3006
3007 /* Look through the currently pushed targets. If none of them will
3008 be able to restart the currently running process, issue an error
3009 message. */
3010
3011 void
3012 target_require_runnable (void)
3013 {
3014 struct target_ops *t;
3015
3016 for (t = target_stack; t != NULL; t = t->beneath)
3017 {
3018 /* If this target knows how to create a new program, then
3019 assume we will still be able to after killing the current
3020 one. Either killing and mourning will not pop T, or else
3021 find_default_run_target will find it again. */
3022 if (t->to_create_inferior != NULL)
3023 return;
3024
3025 /* Do not worry about thread_stratum targets that can not
3026 create inferiors. Assume they will be pushed again if
3027 necessary, and continue to the process_stratum. */
3028 if (t->to_stratum == thread_stratum
3029 || t->to_stratum == arch_stratum)
3030 continue;
3031
3032 error (_("The \"%s\" target does not support \"run\". "
3033 "Try \"help target\" or \"continue\"."),
3034 t->to_shortname);
3035 }
3036
3037 /* This function is only called if the target is running. In that
3038 case there should have been a process_stratum target and it
3039 should either know how to create inferiors, or not... */
3040 internal_error (__FILE__, __LINE__, _("No targets found"));
3041 }
3042
3043 /* Look through the list of possible targets for a target that can
3044 execute a run or attach command without any other data. This is
3045 used to locate the default process stratum.
3046
3047 If DO_MESG is not NULL, the result is always valid (error() is
3048 called for errors); else, return NULL on error. */
3049
3050 static struct target_ops *
3051 find_default_run_target (char *do_mesg)
3052 {
3053 struct target_ops **t;
3054 struct target_ops *runable = NULL;
3055 int count;
3056
3057 count = 0;
3058
3059 for (t = target_structs; t < target_structs + target_struct_size;
3060 ++t)
3061 {
3062 if ((*t)->to_can_run && target_can_run (*t))
3063 {
3064 runable = *t;
3065 ++count;
3066 }
3067 }
3068
3069 if (count != 1)
3070 {
3071 if (do_mesg)
3072 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3073 else
3074 return NULL;
3075 }
3076
3077 return runable;
3078 }
3079
3080 void
3081 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3082 {
3083 struct target_ops *t;
3084
3085 t = find_default_run_target ("attach");
3086 (t->to_attach) (t, args, from_tty);
3087 return;
3088 }
3089
3090 void
3091 find_default_create_inferior (struct target_ops *ops,
3092 char *exec_file, char *allargs, char **env,
3093 int from_tty)
3094 {
3095 struct target_ops *t;
3096
3097 t = find_default_run_target ("run");
3098 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3099 return;
3100 }
3101
3102 static int
3103 find_default_can_async_p (struct target_ops *ignore)
3104 {
3105 struct target_ops *t;
3106
3107 /* This may be called before the target is pushed on the stack;
3108 look for the default process stratum. If there's none, gdb isn't
3109 configured with a native debugger, and target remote isn't
3110 connected yet. */
3111 t = find_default_run_target (NULL);
3112 if (t && t->to_can_async_p != delegate_can_async_p)
3113 return (t->to_can_async_p) (t);
3114 return 0;
3115 }
3116
3117 static int
3118 find_default_is_async_p (struct target_ops *ignore)
3119 {
3120 struct target_ops *t;
3121
3122 /* This may be called before the target is pushed on the stack;
3123 look for the default process stratum. If there's none, gdb isn't
3124 configured with a native debugger, and target remote isn't
3125 connected yet. */
3126 t = find_default_run_target (NULL);
3127 if (t && t->to_is_async_p != delegate_is_async_p)
3128 return (t->to_is_async_p) (t);
3129 return 0;
3130 }
3131
3132 static int
3133 find_default_supports_non_stop (struct target_ops *self)
3134 {
3135 struct target_ops *t;
3136
3137 t = find_default_run_target (NULL);
3138 if (t && t->to_supports_non_stop)
3139 return (t->to_supports_non_stop) (t);
3140 return 0;
3141 }
3142
3143 int
3144 target_supports_non_stop (void)
3145 {
3146 struct target_ops *t;
3147
3148 for (t = &current_target; t != NULL; t = t->beneath)
3149 if (t->to_supports_non_stop)
3150 return t->to_supports_non_stop (t);
3151
3152 return 0;
3153 }
3154
3155 /* Implement the "info proc" command. */
3156
3157 int
3158 target_info_proc (char *args, enum info_proc_what what)
3159 {
3160 struct target_ops *t;
3161
3162 /* If we're already connected to something that can get us OS
3163 related data, use it. Otherwise, try using the native
3164 target. */
3165 if (current_target.to_stratum >= process_stratum)
3166 t = current_target.beneath;
3167 else
3168 t = find_default_run_target (NULL);
3169
3170 for (; t != NULL; t = t->beneath)
3171 {
3172 if (t->to_info_proc != NULL)
3173 {
3174 t->to_info_proc (t, args, what);
3175
3176 if (targetdebug)
3177 fprintf_unfiltered (gdb_stdlog,
3178 "target_info_proc (\"%s\", %d)\n", args, what);
3179
3180 return 1;
3181 }
3182 }
3183
3184 return 0;
3185 }
3186
3187 static int
3188 find_default_supports_disable_randomization (struct target_ops *self)
3189 {
3190 struct target_ops *t;
3191
3192 t = find_default_run_target (NULL);
3193 if (t && t->to_supports_disable_randomization)
3194 return (t->to_supports_disable_randomization) (t);
3195 return 0;
3196 }
3197
3198 int
3199 target_supports_disable_randomization (void)
3200 {
3201 struct target_ops *t;
3202
3203 for (t = &current_target; t != NULL; t = t->beneath)
3204 if (t->to_supports_disable_randomization)
3205 return t->to_supports_disable_randomization (t);
3206
3207 return 0;
3208 }
3209
3210 char *
3211 target_get_osdata (const char *type)
3212 {
3213 struct target_ops *t;
3214
3215 /* If we're already connected to something that can get us OS
3216 related data, use it. Otherwise, try using the native
3217 target. */
3218 if (current_target.to_stratum >= process_stratum)
3219 t = current_target.beneath;
3220 else
3221 t = find_default_run_target ("get OS data");
3222
3223 if (!t)
3224 return NULL;
3225
3226 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3227 }
3228
3229 /* Determine the current address space of thread PTID. */
3230
3231 struct address_space *
3232 target_thread_address_space (ptid_t ptid)
3233 {
3234 struct address_space *aspace;
3235 struct inferior *inf;
3236 struct target_ops *t;
3237
3238 for (t = current_target.beneath; t != NULL; t = t->beneath)
3239 {
3240 if (t->to_thread_address_space != NULL)
3241 {
3242 aspace = t->to_thread_address_space (t, ptid);
3243 gdb_assert (aspace);
3244
3245 if (targetdebug)
3246 fprintf_unfiltered (gdb_stdlog,
3247 "target_thread_address_space (%s) = %d\n",
3248 target_pid_to_str (ptid),
3249 address_space_num (aspace));
3250 return aspace;
3251 }
3252 }
3253
3254 /* Fall-back to the "main" address space of the inferior. */
3255 inf = find_inferior_pid (ptid_get_pid (ptid));
3256
3257 if (inf == NULL || inf->aspace == NULL)
3258 internal_error (__FILE__, __LINE__,
3259 _("Can't determine the current "
3260 "address space of thread %s\n"),
3261 target_pid_to_str (ptid));
3262
3263 return inf->aspace;
3264 }
3265
3266
3267 /* Target file operations. */
3268
3269 static struct target_ops *
3270 default_fileio_target (void)
3271 {
3272 /* If we're already connected to something that can perform
3273 file I/O, use it. Otherwise, try using the native target. */
3274 if (current_target.to_stratum >= process_stratum)
3275 return current_target.beneath;
3276 else
3277 return find_default_run_target ("file I/O");
3278 }
3279
3280 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3281 target file descriptor, or -1 if an error occurs (and set
3282 *TARGET_ERRNO). */
3283 int
3284 target_fileio_open (const char *filename, int flags, int mode,
3285 int *target_errno)
3286 {
3287 struct target_ops *t;
3288
3289 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3290 {
3291 if (t->to_fileio_open != NULL)
3292 {
3293 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3294
3295 if (targetdebug)
3296 fprintf_unfiltered (gdb_stdlog,
3297 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3298 filename, flags, mode,
3299 fd, fd != -1 ? 0 : *target_errno);
3300 return fd;
3301 }
3302 }
3303
3304 *target_errno = FILEIO_ENOSYS;
3305 return -1;
3306 }
3307
3308 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3309 Return the number of bytes written, or -1 if an error occurs
3310 (and set *TARGET_ERRNO). */
3311 int
3312 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3313 ULONGEST offset, int *target_errno)
3314 {
3315 struct target_ops *t;
3316
3317 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3318 {
3319 if (t->to_fileio_pwrite != NULL)
3320 {
3321 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3322 target_errno);
3323
3324 if (targetdebug)
3325 fprintf_unfiltered (gdb_stdlog,
3326 "target_fileio_pwrite (%d,...,%d,%s) "
3327 "= %d (%d)\n",
3328 fd, len, pulongest (offset),
3329 ret, ret != -1 ? 0 : *target_errno);
3330 return ret;
3331 }
3332 }
3333
3334 *target_errno = FILEIO_ENOSYS;
3335 return -1;
3336 }
3337
3338 /* Read up to LEN bytes FD on the target into READ_BUF.
3339 Return the number of bytes read, or -1 if an error occurs
3340 (and set *TARGET_ERRNO). */
3341 int
3342 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3343 ULONGEST offset, int *target_errno)
3344 {
3345 struct target_ops *t;
3346
3347 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3348 {
3349 if (t->to_fileio_pread != NULL)
3350 {
3351 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3352 target_errno);
3353
3354 if (targetdebug)
3355 fprintf_unfiltered (gdb_stdlog,
3356 "target_fileio_pread (%d,...,%d,%s) "
3357 "= %d (%d)\n",
3358 fd, len, pulongest (offset),
3359 ret, ret != -1 ? 0 : *target_errno);
3360 return ret;
3361 }
3362 }
3363
3364 *target_errno = FILEIO_ENOSYS;
3365 return -1;
3366 }
3367
3368 /* Close FD on the target. Return 0, or -1 if an error occurs
3369 (and set *TARGET_ERRNO). */
3370 int
3371 target_fileio_close (int fd, int *target_errno)
3372 {
3373 struct target_ops *t;
3374
3375 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3376 {
3377 if (t->to_fileio_close != NULL)
3378 {
3379 int ret = t->to_fileio_close (t, fd, target_errno);
3380
3381 if (targetdebug)
3382 fprintf_unfiltered (gdb_stdlog,
3383 "target_fileio_close (%d) = %d (%d)\n",
3384 fd, ret, ret != -1 ? 0 : *target_errno);
3385 return ret;
3386 }
3387 }
3388
3389 *target_errno = FILEIO_ENOSYS;
3390 return -1;
3391 }
3392
3393 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3394 occurs (and set *TARGET_ERRNO). */
3395 int
3396 target_fileio_unlink (const char *filename, int *target_errno)
3397 {
3398 struct target_ops *t;
3399
3400 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3401 {
3402 if (t->to_fileio_unlink != NULL)
3403 {
3404 int ret = t->to_fileio_unlink (t, filename, target_errno);
3405
3406 if (targetdebug)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "target_fileio_unlink (%s) = %d (%d)\n",
3409 filename, ret, ret != -1 ? 0 : *target_errno);
3410 return ret;
3411 }
3412 }
3413
3414 *target_errno = FILEIO_ENOSYS;
3415 return -1;
3416 }
3417
3418 /* Read value of symbolic link FILENAME on the target. Return a
3419 null-terminated string allocated via xmalloc, or NULL if an error
3420 occurs (and set *TARGET_ERRNO). */
3421 char *
3422 target_fileio_readlink (const char *filename, int *target_errno)
3423 {
3424 struct target_ops *t;
3425
3426 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3427 {
3428 if (t->to_fileio_readlink != NULL)
3429 {
3430 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3431
3432 if (targetdebug)
3433 fprintf_unfiltered (gdb_stdlog,
3434 "target_fileio_readlink (%s) = %s (%d)\n",
3435 filename, ret? ret : "(nil)",
3436 ret? 0 : *target_errno);
3437 return ret;
3438 }
3439 }
3440
3441 *target_errno = FILEIO_ENOSYS;
3442 return NULL;
3443 }
3444
3445 static void
3446 target_fileio_close_cleanup (void *opaque)
3447 {
3448 int fd = *(int *) opaque;
3449 int target_errno;
3450
3451 target_fileio_close (fd, &target_errno);
3452 }
3453
3454 /* Read target file FILENAME. Store the result in *BUF_P and
3455 return the size of the transferred data. PADDING additional bytes are
3456 available in *BUF_P. This is a helper function for
3457 target_fileio_read_alloc; see the declaration of that function for more
3458 information. */
3459
3460 static LONGEST
3461 target_fileio_read_alloc_1 (const char *filename,
3462 gdb_byte **buf_p, int padding)
3463 {
3464 struct cleanup *close_cleanup;
3465 size_t buf_alloc, buf_pos;
3466 gdb_byte *buf;
3467 LONGEST n;
3468 int fd;
3469 int target_errno;
3470
3471 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3472 if (fd == -1)
3473 return -1;
3474
3475 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3476
3477 /* Start by reading up to 4K at a time. The target will throttle
3478 this number down if necessary. */
3479 buf_alloc = 4096;
3480 buf = xmalloc (buf_alloc);
3481 buf_pos = 0;
3482 while (1)
3483 {
3484 n = target_fileio_pread (fd, &buf[buf_pos],
3485 buf_alloc - buf_pos - padding, buf_pos,
3486 &target_errno);
3487 if (n < 0)
3488 {
3489 /* An error occurred. */
3490 do_cleanups (close_cleanup);
3491 xfree (buf);
3492 return -1;
3493 }
3494 else if (n == 0)
3495 {
3496 /* Read all there was. */
3497 do_cleanups (close_cleanup);
3498 if (buf_pos == 0)
3499 xfree (buf);
3500 else
3501 *buf_p = buf;
3502 return buf_pos;
3503 }
3504
3505 buf_pos += n;
3506
3507 /* If the buffer is filling up, expand it. */
3508 if (buf_alloc < buf_pos * 2)
3509 {
3510 buf_alloc *= 2;
3511 buf = xrealloc (buf, buf_alloc);
3512 }
3513
3514 QUIT;
3515 }
3516 }
3517
3518 /* Read target file FILENAME. Store the result in *BUF_P and return
3519 the size of the transferred data. See the declaration in "target.h"
3520 function for more information about the return value. */
3521
3522 LONGEST
3523 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3524 {
3525 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3526 }
3527
3528 /* Read target file FILENAME. The result is NUL-terminated and
3529 returned as a string, allocated using xmalloc. If an error occurs
3530 or the transfer is unsupported, NULL is returned. Empty objects
3531 are returned as allocated but empty strings. A warning is issued
3532 if the result contains any embedded NUL bytes. */
3533
3534 char *
3535 target_fileio_read_stralloc (const char *filename)
3536 {
3537 gdb_byte *buffer;
3538 char *bufstr;
3539 LONGEST i, transferred;
3540
3541 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3542 bufstr = (char *) buffer;
3543
3544 if (transferred < 0)
3545 return NULL;
3546
3547 if (transferred == 0)
3548 return xstrdup ("");
3549
3550 bufstr[transferred] = 0;
3551
3552 /* Check for embedded NUL bytes; but allow trailing NULs. */
3553 for (i = strlen (bufstr); i < transferred; i++)
3554 if (bufstr[i] != 0)
3555 {
3556 warning (_("target file %s "
3557 "contained unexpected null characters"),
3558 filename);
3559 break;
3560 }
3561
3562 return bufstr;
3563 }
3564
3565
3566 static int
3567 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3568 CORE_ADDR addr, int len)
3569 {
3570 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3571 }
3572
3573 static int
3574 default_watchpoint_addr_within_range (struct target_ops *target,
3575 CORE_ADDR addr,
3576 CORE_ADDR start, int length)
3577 {
3578 return addr >= start && addr < start + length;
3579 }
3580
3581 static struct gdbarch *
3582 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3583 {
3584 return target_gdbarch ();
3585 }
3586
3587 static int
3588 return_zero (void)
3589 {
3590 return 0;
3591 }
3592
3593 static int
3594 return_one (void)
3595 {
3596 return 1;
3597 }
3598
3599 static int
3600 return_minus_one (void)
3601 {
3602 return -1;
3603 }
3604
3605 static void *
3606 return_null (void)
3607 {
3608 return 0;
3609 }
3610
3611 /*
3612 * Find the next target down the stack from the specified target.
3613 */
3614
3615 struct target_ops *
3616 find_target_beneath (struct target_ops *t)
3617 {
3618 return t->beneath;
3619 }
3620
3621 /* See target.h. */
3622
3623 struct target_ops *
3624 find_target_at (enum strata stratum)
3625 {
3626 struct target_ops *t;
3627
3628 for (t = current_target.beneath; t != NULL; t = t->beneath)
3629 if (t->to_stratum == stratum)
3630 return t;
3631
3632 return NULL;
3633 }
3634
3635 \f
3636 /* The inferior process has died. Long live the inferior! */
3637
3638 void
3639 generic_mourn_inferior (void)
3640 {
3641 ptid_t ptid;
3642
3643 ptid = inferior_ptid;
3644 inferior_ptid = null_ptid;
3645
3646 /* Mark breakpoints uninserted in case something tries to delete a
3647 breakpoint while we delete the inferior's threads (which would
3648 fail, since the inferior is long gone). */
3649 mark_breakpoints_out ();
3650
3651 if (!ptid_equal (ptid, null_ptid))
3652 {
3653 int pid = ptid_get_pid (ptid);
3654 exit_inferior (pid);
3655 }
3656
3657 /* Note this wipes step-resume breakpoints, so needs to be done
3658 after exit_inferior, which ends up referencing the step-resume
3659 breakpoints through clear_thread_inferior_resources. */
3660 breakpoint_init_inferior (inf_exited);
3661
3662 registers_changed ();
3663
3664 reopen_exec_file ();
3665 reinit_frame_cache ();
3666
3667 if (deprecated_detach_hook)
3668 deprecated_detach_hook ();
3669 }
3670 \f
3671 /* Convert a normal process ID to a string. Returns the string in a
3672 static buffer. */
3673
3674 char *
3675 normal_pid_to_str (ptid_t ptid)
3676 {
3677 static char buf[32];
3678
3679 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3680 return buf;
3681 }
3682
3683 static char *
3684 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3685 {
3686 return normal_pid_to_str (ptid);
3687 }
3688
3689 /* Error-catcher for target_find_memory_regions. */
3690 static int
3691 dummy_find_memory_regions (struct target_ops *self,
3692 find_memory_region_ftype ignore1, void *ignore2)
3693 {
3694 error (_("Command not implemented for this target."));
3695 return 0;
3696 }
3697
3698 /* Error-catcher for target_make_corefile_notes. */
3699 static char *
3700 dummy_make_corefile_notes (struct target_ops *self,
3701 bfd *ignore1, int *ignore2)
3702 {
3703 error (_("Command not implemented for this target."));
3704 return NULL;
3705 }
3706
3707 /* Error-catcher for target_get_bookmark. */
3708 static gdb_byte *
3709 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3710 {
3711 tcomplain ();
3712 return NULL;
3713 }
3714
3715 /* Error-catcher for target_goto_bookmark. */
3716 static void
3717 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3718 {
3719 tcomplain ();
3720 }
3721
3722 /* Set up the handful of non-empty slots needed by the dummy target
3723 vector. */
3724
3725 static void
3726 init_dummy_target (void)
3727 {
3728 dummy_target.to_shortname = "None";
3729 dummy_target.to_longname = "None";
3730 dummy_target.to_doc = "";
3731 dummy_target.to_create_inferior = find_default_create_inferior;
3732 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3733 dummy_target.to_supports_disable_randomization
3734 = find_default_supports_disable_randomization;
3735 dummy_target.to_pid_to_str = dummy_pid_to_str;
3736 dummy_target.to_stratum = dummy_stratum;
3737 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3738 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3739 dummy_target.to_get_bookmark = dummy_get_bookmark;
3740 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3741 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3742 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3743 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3744 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3745 dummy_target.to_has_execution
3746 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3747 dummy_target.to_magic = OPS_MAGIC;
3748
3749 install_dummy_methods (&dummy_target);
3750 }
3751 \f
3752 static void
3753 debug_to_open (char *args, int from_tty)
3754 {
3755 debug_target.to_open (args, from_tty);
3756
3757 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3758 }
3759
3760 void
3761 target_close (struct target_ops *targ)
3762 {
3763 gdb_assert (!target_is_pushed (targ));
3764
3765 if (targ->to_xclose != NULL)
3766 targ->to_xclose (targ);
3767 else if (targ->to_close != NULL)
3768 targ->to_close (targ);
3769
3770 if (targetdebug)
3771 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3772 }
3773
3774 void
3775 target_attach (char *args, int from_tty)
3776 {
3777 current_target.to_attach (&current_target, args, from_tty);
3778 if (targetdebug)
3779 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3780 args, from_tty);
3781 }
3782
3783 int
3784 target_thread_alive (ptid_t ptid)
3785 {
3786 struct target_ops *t;
3787
3788 for (t = current_target.beneath; t != NULL; t = t->beneath)
3789 {
3790 if (t->to_thread_alive != NULL)
3791 {
3792 int retval;
3793
3794 retval = t->to_thread_alive (t, ptid);
3795 if (targetdebug)
3796 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3797 ptid_get_pid (ptid), retval);
3798
3799 return retval;
3800 }
3801 }
3802
3803 return 0;
3804 }
3805
3806 void
3807 target_find_new_threads (void)
3808 {
3809 struct target_ops *t;
3810
3811 for (t = current_target.beneath; t != NULL; t = t->beneath)
3812 {
3813 if (t->to_find_new_threads != NULL)
3814 {
3815 t->to_find_new_threads (t);
3816 if (targetdebug)
3817 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3818
3819 return;
3820 }
3821 }
3822 }
3823
3824 void
3825 target_stop (ptid_t ptid)
3826 {
3827 if (!may_stop)
3828 {
3829 warning (_("May not interrupt or stop the target, ignoring attempt"));
3830 return;
3831 }
3832
3833 (*current_target.to_stop) (&current_target, ptid);
3834 }
3835
3836 static void
3837 debug_to_post_attach (struct target_ops *self, int pid)
3838 {
3839 debug_target.to_post_attach (&debug_target, pid);
3840
3841 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3842 }
3843
3844 /* Concatenate ELEM to LIST, a comma separate list, and return the
3845 result. The LIST incoming argument is released. */
3846
3847 static char *
3848 str_comma_list_concat_elem (char *list, const char *elem)
3849 {
3850 if (list == NULL)
3851 return xstrdup (elem);
3852 else
3853 return reconcat (list, list, ", ", elem, (char *) NULL);
3854 }
3855
3856 /* Helper for target_options_to_string. If OPT is present in
3857 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3858 Returns the new resulting string. OPT is removed from
3859 TARGET_OPTIONS. */
3860
3861 static char *
3862 do_option (int *target_options, char *ret,
3863 int opt, char *opt_str)
3864 {
3865 if ((*target_options & opt) != 0)
3866 {
3867 ret = str_comma_list_concat_elem (ret, opt_str);
3868 *target_options &= ~opt;
3869 }
3870
3871 return ret;
3872 }
3873
3874 char *
3875 target_options_to_string (int target_options)
3876 {
3877 char *ret = NULL;
3878
3879 #define DO_TARG_OPTION(OPT) \
3880 ret = do_option (&target_options, ret, OPT, #OPT)
3881
3882 DO_TARG_OPTION (TARGET_WNOHANG);
3883
3884 if (target_options != 0)
3885 ret = str_comma_list_concat_elem (ret, "unknown???");
3886
3887 if (ret == NULL)
3888 ret = xstrdup ("");
3889 return ret;
3890 }
3891
3892 static void
3893 debug_print_register (const char * func,
3894 struct regcache *regcache, int regno)
3895 {
3896 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3897
3898 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3899 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3900 && gdbarch_register_name (gdbarch, regno) != NULL
3901 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3902 fprintf_unfiltered (gdb_stdlog, "(%s)",
3903 gdbarch_register_name (gdbarch, regno));
3904 else
3905 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3906 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3907 {
3908 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3909 int i, size = register_size (gdbarch, regno);
3910 gdb_byte buf[MAX_REGISTER_SIZE];
3911
3912 regcache_raw_collect (regcache, regno, buf);
3913 fprintf_unfiltered (gdb_stdlog, " = ");
3914 for (i = 0; i < size; i++)
3915 {
3916 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3917 }
3918 if (size <= sizeof (LONGEST))
3919 {
3920 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3921
3922 fprintf_unfiltered (gdb_stdlog, " %s %s",
3923 core_addr_to_string_nz (val), plongest (val));
3924 }
3925 }
3926 fprintf_unfiltered (gdb_stdlog, "\n");
3927 }
3928
3929 void
3930 target_fetch_registers (struct regcache *regcache, int regno)
3931 {
3932 struct target_ops *t;
3933
3934 for (t = current_target.beneath; t != NULL; t = t->beneath)
3935 {
3936 if (t->to_fetch_registers != NULL)
3937 {
3938 t->to_fetch_registers (t, regcache, regno);
3939 if (targetdebug)
3940 debug_print_register ("target_fetch_registers", regcache, regno);
3941 return;
3942 }
3943 }
3944 }
3945
3946 void
3947 target_store_registers (struct regcache *regcache, int regno)
3948 {
3949 struct target_ops *t;
3950
3951 if (!may_write_registers)
3952 error (_("Writing to registers is not allowed (regno %d)"), regno);
3953
3954 current_target.to_store_registers (&current_target, regcache, regno);
3955 if (targetdebug)
3956 {
3957 debug_print_register ("target_store_registers", regcache, regno);
3958 }
3959 }
3960
3961 int
3962 target_core_of_thread (ptid_t ptid)
3963 {
3964 struct target_ops *t;
3965
3966 for (t = current_target.beneath; t != NULL; t = t->beneath)
3967 {
3968 if (t->to_core_of_thread != NULL)
3969 {
3970 int retval = t->to_core_of_thread (t, ptid);
3971
3972 if (targetdebug)
3973 fprintf_unfiltered (gdb_stdlog,
3974 "target_core_of_thread (%d) = %d\n",
3975 ptid_get_pid (ptid), retval);
3976 return retval;
3977 }
3978 }
3979
3980 return -1;
3981 }
3982
3983 int
3984 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3985 {
3986 struct target_ops *t;
3987
3988 for (t = current_target.beneath; t != NULL; t = t->beneath)
3989 {
3990 if (t->to_verify_memory != NULL)
3991 {
3992 int retval = t->to_verify_memory (t, data, memaddr, size);
3993
3994 if (targetdebug)
3995 fprintf_unfiltered (gdb_stdlog,
3996 "target_verify_memory (%s, %s) = %d\n",
3997 paddress (target_gdbarch (), memaddr),
3998 pulongest (size),
3999 retval);
4000 return retval;
4001 }
4002 }
4003
4004 tcomplain ();
4005 }
4006
4007 /* The documentation for this function is in its prototype declaration in
4008 target.h. */
4009
4010 int
4011 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4012 {
4013 struct target_ops *t;
4014
4015 for (t = current_target.beneath; t != NULL; t = t->beneath)
4016 if (t->to_insert_mask_watchpoint != NULL)
4017 {
4018 int ret;
4019
4020 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4021
4022 if (targetdebug)
4023 fprintf_unfiltered (gdb_stdlog, "\
4024 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4025 core_addr_to_string (addr),
4026 core_addr_to_string (mask), rw, ret);
4027
4028 return ret;
4029 }
4030
4031 return 1;
4032 }
4033
4034 /* The documentation for this function is in its prototype declaration in
4035 target.h. */
4036
4037 int
4038 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4039 {
4040 struct target_ops *t;
4041
4042 for (t = current_target.beneath; t != NULL; t = t->beneath)
4043 if (t->to_remove_mask_watchpoint != NULL)
4044 {
4045 int ret;
4046
4047 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4048
4049 if (targetdebug)
4050 fprintf_unfiltered (gdb_stdlog, "\
4051 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4052 core_addr_to_string (addr),
4053 core_addr_to_string (mask), rw, ret);
4054
4055 return ret;
4056 }
4057
4058 return 1;
4059 }
4060
4061 /* The documentation for this function is in its prototype declaration
4062 in target.h. */
4063
4064 int
4065 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4066 {
4067 struct target_ops *t;
4068
4069 for (t = current_target.beneath; t != NULL; t = t->beneath)
4070 if (t->to_masked_watch_num_registers != NULL)
4071 return t->to_masked_watch_num_registers (t, addr, mask);
4072
4073 return -1;
4074 }
4075
4076 /* The documentation for this function is in its prototype declaration
4077 in target.h. */
4078
4079 int
4080 target_ranged_break_num_registers (void)
4081 {
4082 struct target_ops *t;
4083
4084 for (t = current_target.beneath; t != NULL; t = t->beneath)
4085 if (t->to_ranged_break_num_registers != NULL)
4086 return t->to_ranged_break_num_registers (t);
4087
4088 return -1;
4089 }
4090
4091 /* See target.h. */
4092
4093 struct btrace_target_info *
4094 target_enable_btrace (ptid_t ptid)
4095 {
4096 struct target_ops *t;
4097
4098 for (t = current_target.beneath; t != NULL; t = t->beneath)
4099 if (t->to_enable_btrace != NULL)
4100 return t->to_enable_btrace (t, ptid);
4101
4102 tcomplain ();
4103 return NULL;
4104 }
4105
4106 /* See target.h. */
4107
4108 void
4109 target_disable_btrace (struct btrace_target_info *btinfo)
4110 {
4111 struct target_ops *t;
4112
4113 for (t = current_target.beneath; t != NULL; t = t->beneath)
4114 if (t->to_disable_btrace != NULL)
4115 {
4116 t->to_disable_btrace (t, btinfo);
4117 return;
4118 }
4119
4120 tcomplain ();
4121 }
4122
4123 /* See target.h. */
4124
4125 void
4126 target_teardown_btrace (struct btrace_target_info *btinfo)
4127 {
4128 struct target_ops *t;
4129
4130 for (t = current_target.beneath; t != NULL; t = t->beneath)
4131 if (t->to_teardown_btrace != NULL)
4132 {
4133 t->to_teardown_btrace (t, btinfo);
4134 return;
4135 }
4136
4137 tcomplain ();
4138 }
4139
4140 /* See target.h. */
4141
4142 enum btrace_error
4143 target_read_btrace (VEC (btrace_block_s) **btrace,
4144 struct btrace_target_info *btinfo,
4145 enum btrace_read_type type)
4146 {
4147 struct target_ops *t;
4148
4149 for (t = current_target.beneath; t != NULL; t = t->beneath)
4150 if (t->to_read_btrace != NULL)
4151 return t->to_read_btrace (t, btrace, btinfo, type);
4152
4153 tcomplain ();
4154 return BTRACE_ERR_NOT_SUPPORTED;
4155 }
4156
4157 /* See target.h. */
4158
4159 void
4160 target_stop_recording (void)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_stop_recording != NULL)
4166 {
4167 t->to_stop_recording (t);
4168 return;
4169 }
4170
4171 /* This is optional. */
4172 }
4173
4174 /* See target.h. */
4175
4176 void
4177 target_info_record (void)
4178 {
4179 struct target_ops *t;
4180
4181 for (t = current_target.beneath; t != NULL; t = t->beneath)
4182 if (t->to_info_record != NULL)
4183 {
4184 t->to_info_record (t);
4185 return;
4186 }
4187
4188 tcomplain ();
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_save_record (const char *filename)
4195 {
4196 struct target_ops *t;
4197
4198 for (t = current_target.beneath; t != NULL; t = t->beneath)
4199 if (t->to_save_record != NULL)
4200 {
4201 t->to_save_record (t, filename);
4202 return;
4203 }
4204
4205 tcomplain ();
4206 }
4207
4208 /* See target.h. */
4209
4210 int
4211 target_supports_delete_record (void)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_delete_record != NULL)
4217 return 1;
4218
4219 return 0;
4220 }
4221
4222 /* See target.h. */
4223
4224 void
4225 target_delete_record (void)
4226 {
4227 struct target_ops *t;
4228
4229 for (t = current_target.beneath; t != NULL; t = t->beneath)
4230 if (t->to_delete_record != NULL)
4231 {
4232 t->to_delete_record (t);
4233 return;
4234 }
4235
4236 tcomplain ();
4237 }
4238
4239 /* See target.h. */
4240
4241 int
4242 target_record_is_replaying (void)
4243 {
4244 struct target_ops *t;
4245
4246 for (t = current_target.beneath; t != NULL; t = t->beneath)
4247 if (t->to_record_is_replaying != NULL)
4248 return t->to_record_is_replaying (t);
4249
4250 return 0;
4251 }
4252
4253 /* See target.h. */
4254
4255 void
4256 target_goto_record_begin (void)
4257 {
4258 struct target_ops *t;
4259
4260 for (t = current_target.beneath; t != NULL; t = t->beneath)
4261 if (t->to_goto_record_begin != NULL)
4262 {
4263 t->to_goto_record_begin (t);
4264 return;
4265 }
4266
4267 tcomplain ();
4268 }
4269
4270 /* See target.h. */
4271
4272 void
4273 target_goto_record_end (void)
4274 {
4275 struct target_ops *t;
4276
4277 for (t = current_target.beneath; t != NULL; t = t->beneath)
4278 if (t->to_goto_record_end != NULL)
4279 {
4280 t->to_goto_record_end (t);
4281 return;
4282 }
4283
4284 tcomplain ();
4285 }
4286
4287 /* See target.h. */
4288
4289 void
4290 target_goto_record (ULONGEST insn)
4291 {
4292 struct target_ops *t;
4293
4294 for (t = current_target.beneath; t != NULL; t = t->beneath)
4295 if (t->to_goto_record != NULL)
4296 {
4297 t->to_goto_record (t, insn);
4298 return;
4299 }
4300
4301 tcomplain ();
4302 }
4303
4304 /* See target.h. */
4305
4306 void
4307 target_insn_history (int size, int flags)
4308 {
4309 struct target_ops *t;
4310
4311 for (t = current_target.beneath; t != NULL; t = t->beneath)
4312 if (t->to_insn_history != NULL)
4313 {
4314 t->to_insn_history (t, size, flags);
4315 return;
4316 }
4317
4318 tcomplain ();
4319 }
4320
4321 /* See target.h. */
4322
4323 void
4324 target_insn_history_from (ULONGEST from, int size, int flags)
4325 {
4326 struct target_ops *t;
4327
4328 for (t = current_target.beneath; t != NULL; t = t->beneath)
4329 if (t->to_insn_history_from != NULL)
4330 {
4331 t->to_insn_history_from (t, from, size, flags);
4332 return;
4333 }
4334
4335 tcomplain ();
4336 }
4337
4338 /* See target.h. */
4339
4340 void
4341 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4342 {
4343 struct target_ops *t;
4344
4345 for (t = current_target.beneath; t != NULL; t = t->beneath)
4346 if (t->to_insn_history_range != NULL)
4347 {
4348 t->to_insn_history_range (t, begin, end, flags);
4349 return;
4350 }
4351
4352 tcomplain ();
4353 }
4354
4355 /* See target.h. */
4356
4357 void
4358 target_call_history (int size, int flags)
4359 {
4360 struct target_ops *t;
4361
4362 for (t = current_target.beneath; t != NULL; t = t->beneath)
4363 if (t->to_call_history != NULL)
4364 {
4365 t->to_call_history (t, size, flags);
4366 return;
4367 }
4368
4369 tcomplain ();
4370 }
4371
4372 /* See target.h. */
4373
4374 void
4375 target_call_history_from (ULONGEST begin, int size, int flags)
4376 {
4377 struct target_ops *t;
4378
4379 for (t = current_target.beneath; t != NULL; t = t->beneath)
4380 if (t->to_call_history_from != NULL)
4381 {
4382 t->to_call_history_from (t, begin, size, flags);
4383 return;
4384 }
4385
4386 tcomplain ();
4387 }
4388
4389 /* See target.h. */
4390
4391 void
4392 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4393 {
4394 struct target_ops *t;
4395
4396 for (t = current_target.beneath; t != NULL; t = t->beneath)
4397 if (t->to_call_history_range != NULL)
4398 {
4399 t->to_call_history_range (t, begin, end, flags);
4400 return;
4401 }
4402
4403 tcomplain ();
4404 }
4405
4406 static void
4407 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4408 {
4409 debug_target.to_prepare_to_store (&debug_target, regcache);
4410
4411 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4412 }
4413
4414 /* See target.h. */
4415
4416 const struct frame_unwind *
4417 target_get_unwinder (void)
4418 {
4419 struct target_ops *t;
4420
4421 for (t = current_target.beneath; t != NULL; t = t->beneath)
4422 if (t->to_get_unwinder != NULL)
4423 return t->to_get_unwinder;
4424
4425 return NULL;
4426 }
4427
4428 /* See target.h. */
4429
4430 const struct frame_unwind *
4431 target_get_tailcall_unwinder (void)
4432 {
4433 struct target_ops *t;
4434
4435 for (t = current_target.beneath; t != NULL; t = t->beneath)
4436 if (t->to_get_tailcall_unwinder != NULL)
4437 return t->to_get_tailcall_unwinder;
4438
4439 return NULL;
4440 }
4441
4442 /* See target.h. */
4443
4444 CORE_ADDR
4445 forward_target_decr_pc_after_break (struct target_ops *ops,
4446 struct gdbarch *gdbarch)
4447 {
4448 for (; ops != NULL; ops = ops->beneath)
4449 if (ops->to_decr_pc_after_break != NULL)
4450 return ops->to_decr_pc_after_break (ops, gdbarch);
4451
4452 return gdbarch_decr_pc_after_break (gdbarch);
4453 }
4454
4455 /* See target.h. */
4456
4457 CORE_ADDR
4458 target_decr_pc_after_break (struct gdbarch *gdbarch)
4459 {
4460 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4461 }
4462
4463 static int
4464 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4465 int write, struct mem_attrib *attrib,
4466 struct target_ops *target)
4467 {
4468 int retval;
4469
4470 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4471 attrib, target);
4472
4473 fprintf_unfiltered (gdb_stdlog,
4474 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4475 paddress (target_gdbarch (), memaddr), len,
4476 write ? "write" : "read", retval);
4477
4478 if (retval > 0)
4479 {
4480 int i;
4481
4482 fputs_unfiltered (", bytes =", gdb_stdlog);
4483 for (i = 0; i < retval; i++)
4484 {
4485 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4486 {
4487 if (targetdebug < 2 && i > 0)
4488 {
4489 fprintf_unfiltered (gdb_stdlog, " ...");
4490 break;
4491 }
4492 fprintf_unfiltered (gdb_stdlog, "\n");
4493 }
4494
4495 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4496 }
4497 }
4498
4499 fputc_unfiltered ('\n', gdb_stdlog);
4500
4501 return retval;
4502 }
4503
4504 static void
4505 debug_to_files_info (struct target_ops *target)
4506 {
4507 debug_target.to_files_info (target);
4508
4509 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4510 }
4511
4512 static int
4513 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4514 struct bp_target_info *bp_tgt)
4515 {
4516 int retval;
4517
4518 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4519
4520 fprintf_unfiltered (gdb_stdlog,
4521 "target_insert_breakpoint (%s, xxx) = %ld\n",
4522 core_addr_to_string (bp_tgt->placed_address),
4523 (unsigned long) retval);
4524 return retval;
4525 }
4526
4527 static int
4528 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4529 struct bp_target_info *bp_tgt)
4530 {
4531 int retval;
4532
4533 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4534
4535 fprintf_unfiltered (gdb_stdlog,
4536 "target_remove_breakpoint (%s, xxx) = %ld\n",
4537 core_addr_to_string (bp_tgt->placed_address),
4538 (unsigned long) retval);
4539 return retval;
4540 }
4541
4542 static int
4543 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4544 int type, int cnt, int from_tty)
4545 {
4546 int retval;
4547
4548 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4549 type, cnt, from_tty);
4550
4551 fprintf_unfiltered (gdb_stdlog,
4552 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4553 (unsigned long) type,
4554 (unsigned long) cnt,
4555 (unsigned long) from_tty,
4556 (unsigned long) retval);
4557 return retval;
4558 }
4559
4560 static int
4561 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4562 CORE_ADDR addr, int len)
4563 {
4564 CORE_ADDR retval;
4565
4566 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4567 addr, len);
4568
4569 fprintf_unfiltered (gdb_stdlog,
4570 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4571 core_addr_to_string (addr), (unsigned long) len,
4572 core_addr_to_string (retval));
4573 return retval;
4574 }
4575
4576 static int
4577 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4578 CORE_ADDR addr, int len, int rw,
4579 struct expression *cond)
4580 {
4581 int retval;
4582
4583 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4584 addr, len,
4585 rw, cond);
4586
4587 fprintf_unfiltered (gdb_stdlog,
4588 "target_can_accel_watchpoint_condition "
4589 "(%s, %d, %d, %s) = %ld\n",
4590 core_addr_to_string (addr), len, rw,
4591 host_address_to_string (cond), (unsigned long) retval);
4592 return retval;
4593 }
4594
4595 static int
4596 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4597 {
4598 int retval;
4599
4600 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4601
4602 fprintf_unfiltered (gdb_stdlog,
4603 "target_stopped_by_watchpoint () = %ld\n",
4604 (unsigned long) retval);
4605 return retval;
4606 }
4607
4608 static int
4609 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4610 {
4611 int retval;
4612
4613 retval = debug_target.to_stopped_data_address (target, addr);
4614
4615 fprintf_unfiltered (gdb_stdlog,
4616 "target_stopped_data_address ([%s]) = %ld\n",
4617 core_addr_to_string (*addr),
4618 (unsigned long)retval);
4619 return retval;
4620 }
4621
4622 static int
4623 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4624 CORE_ADDR addr,
4625 CORE_ADDR start, int length)
4626 {
4627 int retval;
4628
4629 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4630 start, length);
4631
4632 fprintf_filtered (gdb_stdlog,
4633 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4634 core_addr_to_string (addr), core_addr_to_string (start),
4635 length, retval);
4636 return retval;
4637 }
4638
4639 static int
4640 debug_to_insert_hw_breakpoint (struct target_ops *self,
4641 struct gdbarch *gdbarch,
4642 struct bp_target_info *bp_tgt)
4643 {
4644 int retval;
4645
4646 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4647 gdbarch, bp_tgt);
4648
4649 fprintf_unfiltered (gdb_stdlog,
4650 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4651 core_addr_to_string (bp_tgt->placed_address),
4652 (unsigned long) retval);
4653 return retval;
4654 }
4655
4656 static int
4657 debug_to_remove_hw_breakpoint (struct target_ops *self,
4658 struct gdbarch *gdbarch,
4659 struct bp_target_info *bp_tgt)
4660 {
4661 int retval;
4662
4663 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4664 gdbarch, bp_tgt);
4665
4666 fprintf_unfiltered (gdb_stdlog,
4667 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4668 core_addr_to_string (bp_tgt->placed_address),
4669 (unsigned long) retval);
4670 return retval;
4671 }
4672
4673 static int
4674 debug_to_insert_watchpoint (struct target_ops *self,
4675 CORE_ADDR addr, int len, int type,
4676 struct expression *cond)
4677 {
4678 int retval;
4679
4680 retval = debug_target.to_insert_watchpoint (&debug_target,
4681 addr, len, type, cond);
4682
4683 fprintf_unfiltered (gdb_stdlog,
4684 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4685 core_addr_to_string (addr), len, type,
4686 host_address_to_string (cond), (unsigned long) retval);
4687 return retval;
4688 }
4689
4690 static int
4691 debug_to_remove_watchpoint (struct target_ops *self,
4692 CORE_ADDR addr, int len, int type,
4693 struct expression *cond)
4694 {
4695 int retval;
4696
4697 retval = debug_target.to_remove_watchpoint (&debug_target,
4698 addr, len, type, cond);
4699
4700 fprintf_unfiltered (gdb_stdlog,
4701 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4702 core_addr_to_string (addr), len, type,
4703 host_address_to_string (cond), (unsigned long) retval);
4704 return retval;
4705 }
4706
4707 static void
4708 debug_to_terminal_init (struct target_ops *self)
4709 {
4710 debug_target.to_terminal_init (&debug_target);
4711
4712 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4713 }
4714
4715 static void
4716 debug_to_terminal_inferior (struct target_ops *self)
4717 {
4718 debug_target.to_terminal_inferior (&debug_target);
4719
4720 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4721 }
4722
4723 static void
4724 debug_to_terminal_ours_for_output (struct target_ops *self)
4725 {
4726 debug_target.to_terminal_ours_for_output (&debug_target);
4727
4728 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4729 }
4730
4731 static void
4732 debug_to_terminal_ours (struct target_ops *self)
4733 {
4734 debug_target.to_terminal_ours (&debug_target);
4735
4736 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4737 }
4738
4739 static void
4740 debug_to_terminal_save_ours (struct target_ops *self)
4741 {
4742 debug_target.to_terminal_save_ours (&debug_target);
4743
4744 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4745 }
4746
4747 static void
4748 debug_to_terminal_info (struct target_ops *self,
4749 const char *arg, int from_tty)
4750 {
4751 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4752
4753 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4754 from_tty);
4755 }
4756
4757 static void
4758 debug_to_load (struct target_ops *self, char *args, int from_tty)
4759 {
4760 debug_target.to_load (&debug_target, args, from_tty);
4761
4762 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4763 }
4764
4765 static void
4766 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4767 {
4768 debug_target.to_post_startup_inferior (&debug_target, ptid);
4769
4770 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4771 ptid_get_pid (ptid));
4772 }
4773
4774 static int
4775 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4776 {
4777 int retval;
4778
4779 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4780
4781 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4782 pid, retval);
4783
4784 return retval;
4785 }
4786
4787 static int
4788 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4789 {
4790 int retval;
4791
4792 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4793
4794 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4795 pid, retval);
4796
4797 return retval;
4798 }
4799
4800 static int
4801 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4802 {
4803 int retval;
4804
4805 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4806
4807 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4808 pid, retval);
4809
4810 return retval;
4811 }
4812
4813 static int
4814 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4815 {
4816 int retval;
4817
4818 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4819
4820 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4821 pid, retval);
4822
4823 return retval;
4824 }
4825
4826 static int
4827 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4828 {
4829 int retval;
4830
4831 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4834 pid, retval);
4835
4836 return retval;
4837 }
4838
4839 static int
4840 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4847 pid, retval);
4848
4849 return retval;
4850 }
4851
4852 static int
4853 debug_to_has_exited (struct target_ops *self,
4854 int pid, int wait_status, int *exit_status)
4855 {
4856 int has_exited;
4857
4858 has_exited = debug_target.to_has_exited (&debug_target,
4859 pid, wait_status, exit_status);
4860
4861 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4862 pid, wait_status, *exit_status, has_exited);
4863
4864 return has_exited;
4865 }
4866
4867 static int
4868 debug_to_can_run (struct target_ops *self)
4869 {
4870 int retval;
4871
4872 retval = debug_target.to_can_run (&debug_target);
4873
4874 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4875
4876 return retval;
4877 }
4878
4879 static struct gdbarch *
4880 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4881 {
4882 struct gdbarch *retval;
4883
4884 retval = debug_target.to_thread_architecture (ops, ptid);
4885
4886 fprintf_unfiltered (gdb_stdlog,
4887 "target_thread_architecture (%s) = %s [%s]\n",
4888 target_pid_to_str (ptid),
4889 host_address_to_string (retval),
4890 gdbarch_bfd_arch_info (retval)->printable_name);
4891 return retval;
4892 }
4893
4894 static void
4895 debug_to_stop (struct target_ops *self, ptid_t ptid)
4896 {
4897 debug_target.to_stop (&debug_target, ptid);
4898
4899 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4900 target_pid_to_str (ptid));
4901 }
4902
4903 static void
4904 debug_to_rcmd (struct target_ops *self, char *command,
4905 struct ui_file *outbuf)
4906 {
4907 debug_target.to_rcmd (&debug_target, command, outbuf);
4908 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4909 }
4910
4911 static char *
4912 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4913 {
4914 char *exec_file;
4915
4916 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4917
4918 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4919 pid, exec_file);
4920
4921 return exec_file;
4922 }
4923
4924 static void
4925 setup_target_debug (void)
4926 {
4927 memcpy (&debug_target, &current_target, sizeof debug_target);
4928
4929 current_target.to_open = debug_to_open;
4930 current_target.to_post_attach = debug_to_post_attach;
4931 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4932 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4933 current_target.to_files_info = debug_to_files_info;
4934 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4935 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4936 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4937 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4938 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4939 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4940 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4941 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4942 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4943 current_target.to_watchpoint_addr_within_range
4944 = debug_to_watchpoint_addr_within_range;
4945 current_target.to_region_ok_for_hw_watchpoint
4946 = debug_to_region_ok_for_hw_watchpoint;
4947 current_target.to_can_accel_watchpoint_condition
4948 = debug_to_can_accel_watchpoint_condition;
4949 current_target.to_terminal_init = debug_to_terminal_init;
4950 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4951 current_target.to_terminal_ours_for_output
4952 = debug_to_terminal_ours_for_output;
4953 current_target.to_terminal_ours = debug_to_terminal_ours;
4954 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4955 current_target.to_terminal_info = debug_to_terminal_info;
4956 current_target.to_load = debug_to_load;
4957 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4958 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4959 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4960 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4961 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4962 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4963 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4964 current_target.to_has_exited = debug_to_has_exited;
4965 current_target.to_can_run = debug_to_can_run;
4966 current_target.to_stop = debug_to_stop;
4967 current_target.to_rcmd = debug_to_rcmd;
4968 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4969 current_target.to_thread_architecture = debug_to_thread_architecture;
4970 }
4971 \f
4972
4973 static char targ_desc[] =
4974 "Names of targets and files being debugged.\nShows the entire \
4975 stack of targets currently in use (including the exec-file,\n\
4976 core-file, and process, if any), as well as the symbol file name.";
4977
4978 static void
4979 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4980 {
4981 error (_("\"monitor\" command not supported by this target."));
4982 }
4983
4984 static void
4985 do_monitor_command (char *cmd,
4986 int from_tty)
4987 {
4988 target_rcmd (cmd, gdb_stdtarg);
4989 }
4990
4991 /* Print the name of each layers of our target stack. */
4992
4993 static void
4994 maintenance_print_target_stack (char *cmd, int from_tty)
4995 {
4996 struct target_ops *t;
4997
4998 printf_filtered (_("The current target stack is:\n"));
4999
5000 for (t = target_stack; t != NULL; t = t->beneath)
5001 {
5002 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5003 }
5004 }
5005
5006 /* Controls if async mode is permitted. */
5007 int target_async_permitted = 0;
5008
5009 /* The set command writes to this variable. If the inferior is
5010 executing, target_async_permitted is *not* updated. */
5011 static int target_async_permitted_1 = 0;
5012
5013 static void
5014 set_target_async_command (char *args, int from_tty,
5015 struct cmd_list_element *c)
5016 {
5017 if (have_live_inferiors ())
5018 {
5019 target_async_permitted_1 = target_async_permitted;
5020 error (_("Cannot change this setting while the inferior is running."));
5021 }
5022
5023 target_async_permitted = target_async_permitted_1;
5024 }
5025
5026 static void
5027 show_target_async_command (struct ui_file *file, int from_tty,
5028 struct cmd_list_element *c,
5029 const char *value)
5030 {
5031 fprintf_filtered (file,
5032 _("Controlling the inferior in "
5033 "asynchronous mode is %s.\n"), value);
5034 }
5035
5036 /* Temporary copies of permission settings. */
5037
5038 static int may_write_registers_1 = 1;
5039 static int may_write_memory_1 = 1;
5040 static int may_insert_breakpoints_1 = 1;
5041 static int may_insert_tracepoints_1 = 1;
5042 static int may_insert_fast_tracepoints_1 = 1;
5043 static int may_stop_1 = 1;
5044
5045 /* Make the user-set values match the real values again. */
5046
5047 void
5048 update_target_permissions (void)
5049 {
5050 may_write_registers_1 = may_write_registers;
5051 may_write_memory_1 = may_write_memory;
5052 may_insert_breakpoints_1 = may_insert_breakpoints;
5053 may_insert_tracepoints_1 = may_insert_tracepoints;
5054 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5055 may_stop_1 = may_stop;
5056 }
5057
5058 /* The one function handles (most of) the permission flags in the same
5059 way. */
5060
5061 static void
5062 set_target_permissions (char *args, int from_tty,
5063 struct cmd_list_element *c)
5064 {
5065 if (target_has_execution)
5066 {
5067 update_target_permissions ();
5068 error (_("Cannot change this setting while the inferior is running."));
5069 }
5070
5071 /* Make the real values match the user-changed values. */
5072 may_write_registers = may_write_registers_1;
5073 may_insert_breakpoints = may_insert_breakpoints_1;
5074 may_insert_tracepoints = may_insert_tracepoints_1;
5075 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5076 may_stop = may_stop_1;
5077 update_observer_mode ();
5078 }
5079
5080 /* Set memory write permission independently of observer mode. */
5081
5082 static void
5083 set_write_memory_permission (char *args, int from_tty,
5084 struct cmd_list_element *c)
5085 {
5086 /* Make the real values match the user-changed values. */
5087 may_write_memory = may_write_memory_1;
5088 update_observer_mode ();
5089 }
5090
5091
5092 void
5093 initialize_targets (void)
5094 {
5095 init_dummy_target ();
5096 push_target (&dummy_target);
5097
5098 add_info ("target", target_info, targ_desc);
5099 add_info ("files", target_info, targ_desc);
5100
5101 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5102 Set target debugging."), _("\
5103 Show target debugging."), _("\
5104 When non-zero, target debugging is enabled. Higher numbers are more\n\
5105 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5106 command."),
5107 NULL,
5108 show_targetdebug,
5109 &setdebuglist, &showdebuglist);
5110
5111 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5112 &trust_readonly, _("\
5113 Set mode for reading from readonly sections."), _("\
5114 Show mode for reading from readonly sections."), _("\
5115 When this mode is on, memory reads from readonly sections (such as .text)\n\
5116 will be read from the object file instead of from the target. This will\n\
5117 result in significant performance improvement for remote targets."),
5118 NULL,
5119 show_trust_readonly,
5120 &setlist, &showlist);
5121
5122 add_com ("monitor", class_obscure, do_monitor_command,
5123 _("Send a command to the remote monitor (remote targets only)."));
5124
5125 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5126 _("Print the name of each layer of the internal target stack."),
5127 &maintenanceprintlist);
5128
5129 add_setshow_boolean_cmd ("target-async", no_class,
5130 &target_async_permitted_1, _("\
5131 Set whether gdb controls the inferior in asynchronous mode."), _("\
5132 Show whether gdb controls the inferior in asynchronous mode."), _("\
5133 Tells gdb whether to control the inferior in asynchronous mode."),
5134 set_target_async_command,
5135 show_target_async_command,
5136 &setlist,
5137 &showlist);
5138
5139 add_setshow_boolean_cmd ("may-write-registers", class_support,
5140 &may_write_registers_1, _("\
5141 Set permission to write into registers."), _("\
5142 Show permission to write into registers."), _("\
5143 When this permission is on, GDB may write into the target's registers.\n\
5144 Otherwise, any sort of write attempt will result in an error."),
5145 set_target_permissions, NULL,
5146 &setlist, &showlist);
5147
5148 add_setshow_boolean_cmd ("may-write-memory", class_support,
5149 &may_write_memory_1, _("\
5150 Set permission to write into target memory."), _("\
5151 Show permission to write into target memory."), _("\
5152 When this permission is on, GDB may write into the target's memory.\n\
5153 Otherwise, any sort of write attempt will result in an error."),
5154 set_write_memory_permission, NULL,
5155 &setlist, &showlist);
5156
5157 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5158 &may_insert_breakpoints_1, _("\
5159 Set permission to insert breakpoints in the target."), _("\
5160 Show permission to insert breakpoints in the target."), _("\
5161 When this permission is on, GDB may insert breakpoints in the program.\n\
5162 Otherwise, any sort of insertion attempt will result in an error."),
5163 set_target_permissions, NULL,
5164 &setlist, &showlist);
5165
5166 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5167 &may_insert_tracepoints_1, _("\
5168 Set permission to insert tracepoints in the target."), _("\
5169 Show permission to insert tracepoints in the target."), _("\
5170 When this permission is on, GDB may insert tracepoints in the program.\n\
5171 Otherwise, any sort of insertion attempt will result in an error."),
5172 set_target_permissions, NULL,
5173 &setlist, &showlist);
5174
5175 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5176 &may_insert_fast_tracepoints_1, _("\
5177 Set permission to insert fast tracepoints in the target."), _("\
5178 Show permission to insert fast tracepoints in the target."), _("\
5179 When this permission is on, GDB may insert fast tracepoints.\n\
5180 Otherwise, any sort of insertion attempt will result in an error."),
5181 set_target_permissions, NULL,
5182 &setlist, &showlist);
5183
5184 add_setshow_boolean_cmd ("may-interrupt", class_support,
5185 &may_stop_1, _("\
5186 Set permission to interrupt or signal the target."), _("\
5187 Show permission to interrupt or signal the target."), _("\
5188 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5189 Otherwise, any attempt to interrupt or stop will be ignored."),
5190 set_target_permissions, NULL,
5191 &setlist, &showlist);
5192 }
This page took 0.141234 seconds and 4 git commands to generate.