convert to_auxv_parse
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static void tcomplain (void) ATTRIBUTE_NORETURN;
70
71 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
72
73 static int return_zero (void);
74
75 void target_ignore (void);
76
77 static void target_command (char *, int);
78
79 static struct target_ops *find_default_run_target (char *);
80
81 static target_xfer_partial_ftype default_xfer_partial;
82
83 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
84 ptid_t ptid);
85
86 static int dummy_find_memory_regions (struct target_ops *self,
87 find_memory_region_ftype ignore1,
88 void *ignore2);
89
90 static char *dummy_make_corefile_notes (struct target_ops *self,
91 bfd *ignore1, int *ignore2);
92
93 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
94
95 static int find_default_can_async_p (struct target_ops *ignore);
96
97 static int find_default_is_async_p (struct target_ops *ignore);
98
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops *self);
101
102 #include "target-delegates.c"
103
104 static void init_dummy_target (void);
105
106 static struct target_ops debug_target;
107
108 static void debug_to_open (char *, int);
109
110 static void debug_to_prepare_to_store (struct target_ops *self,
111 struct regcache *);
112
113 static void debug_to_files_info (struct target_ops *);
114
115 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
116 struct bp_target_info *);
117
118 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
119 struct bp_target_info *);
120
121 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
122 int, int, int);
123
124 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
129 struct gdbarch *,
130 struct bp_target_info *);
131
132 static int debug_to_insert_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_remove_watchpoint (struct target_ops *self,
137 CORE_ADDR, int, int,
138 struct expression *);
139
140 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
141
142 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
143 CORE_ADDR, CORE_ADDR, int);
144
145 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
146 CORE_ADDR, int);
147
148 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
149 CORE_ADDR, int, int,
150 struct expression *);
151
152 static void debug_to_terminal_init (struct target_ops *self);
153
154 static void debug_to_terminal_inferior (struct target_ops *self);
155
156 static void debug_to_terminal_ours_for_output (struct target_ops *self);
157
158 static void debug_to_terminal_save_ours (struct target_ops *self);
159
160 static void debug_to_terminal_ours (struct target_ops *self);
161
162 static void debug_to_load (struct target_ops *self, char *, int);
163
164 static int debug_to_can_run (struct target_ops *self);
165
166 static void debug_to_stop (struct target_ops *self, ptid_t);
167
168 /* Pointer to array of target architecture structures; the size of the
169 array; the current index into the array; the allocated size of the
170 array. */
171 struct target_ops **target_structs;
172 unsigned target_struct_size;
173 unsigned target_struct_allocsize;
174 #define DEFAULT_ALLOCSIZE 10
175
176 /* The initial current target, so that there is always a semi-valid
177 current target. */
178
179 static struct target_ops dummy_target;
180
181 /* Top of target stack. */
182
183 static struct target_ops *target_stack;
184
185 /* The target structure we are currently using to talk to a process
186 or file or whatever "inferior" we have. */
187
188 struct target_ops current_target;
189
190 /* Command list for target. */
191
192 static struct cmd_list_element *targetlist = NULL;
193
194 /* Nonzero if we should trust readonly sections from the
195 executable when reading memory. */
196
197 static int trust_readonly = 0;
198
199 /* Nonzero if we should show true memory content including
200 memory breakpoint inserted by gdb. */
201
202 static int show_memory_breakpoints = 0;
203
204 /* These globals control whether GDB attempts to perform these
205 operations; they are useful for targets that need to prevent
206 inadvertant disruption, such as in non-stop mode. */
207
208 int may_write_registers = 1;
209
210 int may_write_memory = 1;
211
212 int may_insert_breakpoints = 1;
213
214 int may_insert_tracepoints = 1;
215
216 int may_insert_fast_tracepoints = 1;
217
218 int may_stop = 1;
219
220 /* Non-zero if we want to see trace of target level stuff. */
221
222 static unsigned int targetdebug = 0;
223 static void
224 show_targetdebug (struct ui_file *file, int from_tty,
225 struct cmd_list_element *c, const char *value)
226 {
227 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
228 }
229
230 static void setup_target_debug (void);
231
232 /* The user just typed 'target' without the name of a target. */
233
234 static void
235 target_command (char *arg, int from_tty)
236 {
237 fputs_filtered ("Argument required (target name). Try `help target'\n",
238 gdb_stdout);
239 }
240
241 /* Default target_has_* methods for process_stratum targets. */
242
243 int
244 default_child_has_all_memory (struct target_ops *ops)
245 {
246 /* If no inferior selected, then we can't read memory here. */
247 if (ptid_equal (inferior_ptid, null_ptid))
248 return 0;
249
250 return 1;
251 }
252
253 int
254 default_child_has_memory (struct target_ops *ops)
255 {
256 /* If no inferior selected, then we can't read memory here. */
257 if (ptid_equal (inferior_ptid, null_ptid))
258 return 0;
259
260 return 1;
261 }
262
263 int
264 default_child_has_stack (struct target_ops *ops)
265 {
266 /* If no inferior selected, there's no stack. */
267 if (ptid_equal (inferior_ptid, null_ptid))
268 return 0;
269
270 return 1;
271 }
272
273 int
274 default_child_has_registers (struct target_ops *ops)
275 {
276 /* Can't read registers from no inferior. */
277 if (ptid_equal (inferior_ptid, null_ptid))
278 return 0;
279
280 return 1;
281 }
282
283 int
284 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
285 {
286 /* If there's no thread selected, then we can't make it run through
287 hoops. */
288 if (ptid_equal (the_ptid, null_ptid))
289 return 0;
290
291 return 1;
292 }
293
294
295 int
296 target_has_all_memory_1 (void)
297 {
298 struct target_ops *t;
299
300 for (t = current_target.beneath; t != NULL; t = t->beneath)
301 if (t->to_has_all_memory (t))
302 return 1;
303
304 return 0;
305 }
306
307 int
308 target_has_memory_1 (void)
309 {
310 struct target_ops *t;
311
312 for (t = current_target.beneath; t != NULL; t = t->beneath)
313 if (t->to_has_memory (t))
314 return 1;
315
316 return 0;
317 }
318
319 int
320 target_has_stack_1 (void)
321 {
322 struct target_ops *t;
323
324 for (t = current_target.beneath; t != NULL; t = t->beneath)
325 if (t->to_has_stack (t))
326 return 1;
327
328 return 0;
329 }
330
331 int
332 target_has_registers_1 (void)
333 {
334 struct target_ops *t;
335
336 for (t = current_target.beneath; t != NULL; t = t->beneath)
337 if (t->to_has_registers (t))
338 return 1;
339
340 return 0;
341 }
342
343 int
344 target_has_execution_1 (ptid_t the_ptid)
345 {
346 struct target_ops *t;
347
348 for (t = current_target.beneath; t != NULL; t = t->beneath)
349 if (t->to_has_execution (t, the_ptid))
350 return 1;
351
352 return 0;
353 }
354
355 int
356 target_has_execution_current (void)
357 {
358 return target_has_execution_1 (inferior_ptid);
359 }
360
361 /* Complete initialization of T. This ensures that various fields in
362 T are set, if needed by the target implementation. */
363
364 void
365 complete_target_initialization (struct target_ops *t)
366 {
367 /* Provide default values for all "must have" methods. */
368 if (t->to_xfer_partial == NULL)
369 t->to_xfer_partial = default_xfer_partial;
370
371 if (t->to_has_all_memory == NULL)
372 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
373
374 if (t->to_has_memory == NULL)
375 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
376
377 if (t->to_has_stack == NULL)
378 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
379
380 if (t->to_has_registers == NULL)
381 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
382
383 if (t->to_has_execution == NULL)
384 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
385
386 install_delegators (t);
387 }
388
389 /* Add possible target architecture T to the list and add a new
390 command 'target T->to_shortname'. Set COMPLETER as the command's
391 completer if not NULL. */
392
393 void
394 add_target_with_completer (struct target_ops *t,
395 completer_ftype *completer)
396 {
397 struct cmd_list_element *c;
398
399 complete_target_initialization (t);
400
401 if (!target_structs)
402 {
403 target_struct_allocsize = DEFAULT_ALLOCSIZE;
404 target_structs = (struct target_ops **) xmalloc
405 (target_struct_allocsize * sizeof (*target_structs));
406 }
407 if (target_struct_size >= target_struct_allocsize)
408 {
409 target_struct_allocsize *= 2;
410 target_structs = (struct target_ops **)
411 xrealloc ((char *) target_structs,
412 target_struct_allocsize * sizeof (*target_structs));
413 }
414 target_structs[target_struct_size++] = t;
415
416 if (targetlist == NULL)
417 add_prefix_cmd ("target", class_run, target_command, _("\
418 Connect to a target machine or process.\n\
419 The first argument is the type or protocol of the target machine.\n\
420 Remaining arguments are interpreted by the target protocol. For more\n\
421 information on the arguments for a particular protocol, type\n\
422 `help target ' followed by the protocol name."),
423 &targetlist, "target ", 0, &cmdlist);
424 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
425 &targetlist);
426 if (completer != NULL)
427 set_cmd_completer (c, completer);
428 }
429
430 /* Add a possible target architecture to the list. */
431
432 void
433 add_target (struct target_ops *t)
434 {
435 add_target_with_completer (t, NULL);
436 }
437
438 /* See target.h. */
439
440 void
441 add_deprecated_target_alias (struct target_ops *t, char *alias)
442 {
443 struct cmd_list_element *c;
444 char *alt;
445
446 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
447 see PR cli/15104. */
448 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
449 alt = xstrprintf ("target %s", t->to_shortname);
450 deprecate_cmd (c, alt);
451 }
452
453 /* Stub functions */
454
455 void
456 target_ignore (void)
457 {
458 }
459
460 void
461 target_kill (void)
462 {
463 if (targetdebug)
464 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
465
466 current_target.to_kill (&current_target);
467 }
468
469 void
470 target_load (char *arg, int from_tty)
471 {
472 target_dcache_invalidate ();
473 (*current_target.to_load) (&current_target, arg, from_tty);
474 }
475
476 void
477 target_create_inferior (char *exec_file, char *args,
478 char **env, int from_tty)
479 {
480 struct target_ops *t;
481
482 for (t = current_target.beneath; t != NULL; t = t->beneath)
483 {
484 if (t->to_create_inferior != NULL)
485 {
486 t->to_create_inferior (t, exec_file, args, env, from_tty);
487 if (targetdebug)
488 fprintf_unfiltered (gdb_stdlog,
489 "target_create_inferior (%s, %s, xxx, %d)\n",
490 exec_file, args, from_tty);
491 return;
492 }
493 }
494
495 internal_error (__FILE__, __LINE__,
496 _("could not find a target to create inferior"));
497 }
498
499 void
500 target_terminal_inferior (void)
501 {
502 /* A background resume (``run&'') should leave GDB in control of the
503 terminal. Use target_can_async_p, not target_is_async_p, since at
504 this point the target is not async yet. However, if sync_execution
505 is not set, we know it will become async prior to resume. */
506 if (target_can_async_p () && !sync_execution)
507 return;
508
509 /* If GDB is resuming the inferior in the foreground, install
510 inferior's terminal modes. */
511 (*current_target.to_terminal_inferior) (&current_target);
512 }
513
514 static int
515 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
516 struct target_ops *t)
517 {
518 errno = EIO; /* Can't read/write this location. */
519 return 0; /* No bytes handled. */
520 }
521
522 static void
523 tcomplain (void)
524 {
525 error (_("You can't do that when your target is `%s'"),
526 current_target.to_shortname);
527 }
528
529 void
530 noprocess (void)
531 {
532 error (_("You can't do that without a process to debug."));
533 }
534
535 static void
536 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
537 {
538 printf_unfiltered (_("No saved terminal information.\n"));
539 }
540
541 /* A default implementation for the to_get_ada_task_ptid target method.
542
543 This function builds the PTID by using both LWP and TID as part of
544 the PTID lwp and tid elements. The pid used is the pid of the
545 inferior_ptid. */
546
547 static ptid_t
548 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
549 {
550 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
551 }
552
553 static enum exec_direction_kind
554 default_execution_direction (struct target_ops *self)
555 {
556 if (!target_can_execute_reverse)
557 return EXEC_FORWARD;
558 else if (!target_can_async_p ())
559 return EXEC_FORWARD;
560 else
561 gdb_assert_not_reached ("\
562 to_execution_direction must be implemented for reverse async");
563 }
564
565 /* Go through the target stack from top to bottom, copying over zero
566 entries in current_target, then filling in still empty entries. In
567 effect, we are doing class inheritance through the pushed target
568 vectors.
569
570 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
571 is currently implemented, is that it discards any knowledge of
572 which target an inherited method originally belonged to.
573 Consequently, new new target methods should instead explicitly and
574 locally search the target stack for the target that can handle the
575 request. */
576
577 static void
578 update_current_target (void)
579 {
580 struct target_ops *t;
581
582 /* First, reset current's contents. */
583 memset (&current_target, 0, sizeof (current_target));
584
585 /* Install the delegators. */
586 install_delegators (&current_target);
587
588 #define INHERIT(FIELD, TARGET) \
589 if (!current_target.FIELD) \
590 current_target.FIELD = (TARGET)->FIELD
591
592 for (t = target_stack; t; t = t->beneath)
593 {
594 INHERIT (to_shortname, t);
595 INHERIT (to_longname, t);
596 INHERIT (to_doc, t);
597 /* Do not inherit to_open. */
598 /* Do not inherit to_close. */
599 /* Do not inherit to_attach. */
600 /* Do not inherit to_post_attach. */
601 INHERIT (to_attach_no_wait, t);
602 /* Do not inherit to_detach. */
603 /* Do not inherit to_disconnect. */
604 /* Do not inherit to_resume. */
605 /* Do not inherit to_wait. */
606 /* Do not inherit to_fetch_registers. */
607 /* Do not inherit to_store_registers. */
608 /* Do not inherit to_prepare_to_store. */
609 INHERIT (deprecated_xfer_memory, t);
610 /* Do not inherit to_files_info. */
611 /* Do not inherit to_insert_breakpoint. */
612 /* Do not inherit to_remove_breakpoint. */
613 /* Do not inherit to_can_use_hw_breakpoint. */
614 /* Do not inherit to_insert_hw_breakpoint. */
615 /* Do not inherit to_remove_hw_breakpoint. */
616 /* Do not inherit to_ranged_break_num_registers. */
617 /* Do not inherit to_insert_watchpoint. */
618 /* Do not inherit to_remove_watchpoint. */
619 /* Do not inherit to_insert_mask_watchpoint. */
620 /* Do not inherit to_remove_mask_watchpoint. */
621 /* Do not inherit to_stopped_data_address. */
622 INHERIT (to_have_steppable_watchpoint, t);
623 INHERIT (to_have_continuable_watchpoint, t);
624 /* Do not inherit to_stopped_by_watchpoint. */
625 /* Do not inherit to_watchpoint_addr_within_range. */
626 /* Do not inherit to_region_ok_for_hw_watchpoint. */
627 /* Do not inherit to_can_accel_watchpoint_condition. */
628 /* Do not inherit to_masked_watch_num_registers. */
629 /* Do not inherit to_terminal_init. */
630 /* Do not inherit to_terminal_inferior. */
631 /* Do not inherit to_terminal_ours_for_output. */
632 /* Do not inherit to_terminal_ours. */
633 /* Do not inherit to_terminal_save_ours. */
634 /* Do not inherit to_terminal_info. */
635 /* Do not inherit to_kill. */
636 /* Do not inherit to_load. */
637 /* Do no inherit to_create_inferior. */
638 /* Do not inherit to_post_startup_inferior. */
639 /* Do not inherit to_insert_fork_catchpoint. */
640 /* Do not inherit to_remove_fork_catchpoint. */
641 /* Do not inherit to_insert_vfork_catchpoint. */
642 /* Do not inherit to_remove_vfork_catchpoint. */
643 /* Do not inherit to_follow_fork. */
644 /* Do not inherit to_insert_exec_catchpoint. */
645 /* Do not inherit to_remove_exec_catchpoint. */
646 /* Do not inherit to_set_syscall_catchpoint. */
647 /* Do not inherit to_has_exited. */
648 /* Do not inherit to_mourn_inferior. */
649 INHERIT (to_can_run, t);
650 /* Do not inherit to_pass_signals. */
651 /* Do not inherit to_program_signals. */
652 /* Do not inherit to_thread_alive. */
653 /* Do not inherit to_find_new_threads. */
654 /* Do not inherit to_pid_to_str. */
655 /* Do not inherit to_extra_thread_info. */
656 /* Do not inherit to_thread_name. */
657 /* Do not inherit to_stop. */
658 /* Do not inherit to_xfer_partial. */
659 /* Do not inherit to_rcmd. */
660 /* Do not inherit to_pid_to_exec_file. */
661 /* Do not inherit to_log_command. */
662 INHERIT (to_stratum, t);
663 /* Do not inherit to_has_all_memory. */
664 /* Do not inherit to_has_memory. */
665 /* Do not inherit to_has_stack. */
666 /* Do not inherit to_has_registers. */
667 /* Do not inherit to_has_execution. */
668 INHERIT (to_has_thread_control, t);
669 /* Do not inherit to_can_async_p. */
670 /* Do not inherit to_is_async_p. */
671 /* Do not inherit to_async. */
672 /* Do not inherit to_find_memory_regions. */
673 /* Do not inherit to_make_corefile_notes. */
674 /* Do not inherit to_get_bookmark. */
675 /* Do not inherit to_goto_bookmark. */
676 /* Do not inherit to_get_thread_local_address. */
677 /* Do not inherit to_can_execute_reverse. */
678 /* Do not inherit to_execution_direction. */
679 /* Do not inherit to_thread_architecture. */
680 /* Do not inherit to_read_description. */
681 /* Do not inherit to_get_ada_task_ptid. */
682 /* Do not inherit to_search_memory. */
683 /* Do not inherit to_supports_multi_process. */
684 /* Do not inherit to_supports_enable_disable_tracepoint. */
685 /* Do not inherit to_supports_string_tracing. */
686 /* Do not inherit to_trace_init. */
687 /* Do not inherit to_download_tracepoint. */
688 /* Do not inherit to_can_download_tracepoint. */
689 /* Do not inherit to_download_trace_state_variable. */
690 /* Do not inherit to_enable_tracepoint. */
691 /* Do not inherit to_disable_tracepoint. */
692 /* Do not inherit to_trace_set_readonly_regions. */
693 /* Do not inherit to_trace_start. */
694 /* Do not inherit to_get_trace_status. */
695 /* Do not inherit to_get_tracepoint_status. */
696 /* Do not inherit to_trace_stop. */
697 /* Do not inherit to_trace_find. */
698 /* Do not inherit to_get_trace_state_variable_value. */
699 /* Do not inherit to_save_trace_data. */
700 /* Do not inherit to_upload_tracepoints. */
701 /* Do not inherit to_upload_trace_state_variables. */
702 /* Do not inherit to_get_raw_trace_data. */
703 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
704 /* Do not inherit to_set_disconnected_tracing. */
705 /* Do not inherit to_set_circular_trace_buffer. */
706 /* Do not inherit to_set_trace_buffer_size. */
707 /* Do not inherit to_set_trace_notes. */
708 /* Do not inherit to_get_tib_address. */
709 /* Do not inherit to_set_permissions. */
710 /* Do not inherit to_static_tracepoint_marker_at. */
711 /* Do not inherit to_static_tracepoint_markers_by_strid. */
712 /* Do not inherit to_traceframe_info. */
713 /* Do not inherit to_use_agent. */
714 /* Do not inherit to_can_use_agent. */
715 /* Do not inherit to_augmented_libraries_svr4_read. */
716 INHERIT (to_magic, t);
717 /* Do not inherit
718 to_supports_evaluation_of_breakpoint_conditions. */
719 /* Do not inherit to_can_run_breakpoint_commands. */
720 /* Do not inherit to_memory_map. */
721 /* Do not inherit to_flash_erase. */
722 /* Do not inherit to_flash_done. */
723 }
724 #undef INHERIT
725
726 /* Clean up a target struct so it no longer has any zero pointers in
727 it. Some entries are defaulted to a method that print an error,
728 others are hard-wired to a standard recursive default. */
729
730 #define de_fault(field, value) \
731 if (!current_target.field) \
732 current_target.field = value
733
734 de_fault (to_open,
735 (void (*) (char *, int))
736 tcomplain);
737 de_fault (to_close,
738 (void (*) (struct target_ops *))
739 target_ignore);
740 de_fault (deprecated_xfer_memory,
741 (int (*) (CORE_ADDR, gdb_byte *, int, int,
742 struct mem_attrib *, struct target_ops *))
743 nomemory);
744 de_fault (to_can_run,
745 (int (*) (struct target_ops *))
746 return_zero);
747 current_target.to_read_description = NULL;
748
749 #undef de_fault
750
751 /* Finally, position the target-stack beneath the squashed
752 "current_target". That way code looking for a non-inherited
753 target method can quickly and simply find it. */
754 current_target.beneath = target_stack;
755
756 if (targetdebug)
757 setup_target_debug ();
758 }
759
760 /* Push a new target type into the stack of the existing target accessors,
761 possibly superseding some of the existing accessors.
762
763 Rather than allow an empty stack, we always have the dummy target at
764 the bottom stratum, so we can call the function vectors without
765 checking them. */
766
767 void
768 push_target (struct target_ops *t)
769 {
770 struct target_ops **cur;
771
772 /* Check magic number. If wrong, it probably means someone changed
773 the struct definition, but not all the places that initialize one. */
774 if (t->to_magic != OPS_MAGIC)
775 {
776 fprintf_unfiltered (gdb_stderr,
777 "Magic number of %s target struct wrong\n",
778 t->to_shortname);
779 internal_error (__FILE__, __LINE__,
780 _("failed internal consistency check"));
781 }
782
783 /* Find the proper stratum to install this target in. */
784 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
785 {
786 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
787 break;
788 }
789
790 /* If there's already targets at this stratum, remove them. */
791 /* FIXME: cagney/2003-10-15: I think this should be popping all
792 targets to CUR, and not just those at this stratum level. */
793 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
794 {
795 /* There's already something at this stratum level. Close it,
796 and un-hook it from the stack. */
797 struct target_ops *tmp = (*cur);
798
799 (*cur) = (*cur)->beneath;
800 tmp->beneath = NULL;
801 target_close (tmp);
802 }
803
804 /* We have removed all targets in our stratum, now add the new one. */
805 t->beneath = (*cur);
806 (*cur) = t;
807
808 update_current_target ();
809 }
810
811 /* Remove a target_ops vector from the stack, wherever it may be.
812 Return how many times it was removed (0 or 1). */
813
814 int
815 unpush_target (struct target_ops *t)
816 {
817 struct target_ops **cur;
818 struct target_ops *tmp;
819
820 if (t->to_stratum == dummy_stratum)
821 internal_error (__FILE__, __LINE__,
822 _("Attempt to unpush the dummy target"));
823
824 /* Look for the specified target. Note that we assume that a target
825 can only occur once in the target stack. */
826
827 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
828 {
829 if ((*cur) == t)
830 break;
831 }
832
833 /* If we don't find target_ops, quit. Only open targets should be
834 closed. */
835 if ((*cur) == NULL)
836 return 0;
837
838 /* Unchain the target. */
839 tmp = (*cur);
840 (*cur) = (*cur)->beneath;
841 tmp->beneath = NULL;
842
843 update_current_target ();
844
845 /* Finally close the target. Note we do this after unchaining, so
846 any target method calls from within the target_close
847 implementation don't end up in T anymore. */
848 target_close (t);
849
850 return 1;
851 }
852
853 void
854 pop_all_targets_above (enum strata above_stratum)
855 {
856 while ((int) (current_target.to_stratum) > (int) above_stratum)
857 {
858 if (!unpush_target (target_stack))
859 {
860 fprintf_unfiltered (gdb_stderr,
861 "pop_all_targets couldn't find target %s\n",
862 target_stack->to_shortname);
863 internal_error (__FILE__, __LINE__,
864 _("failed internal consistency check"));
865 break;
866 }
867 }
868 }
869
870 void
871 pop_all_targets (void)
872 {
873 pop_all_targets_above (dummy_stratum);
874 }
875
876 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
877
878 int
879 target_is_pushed (struct target_ops *t)
880 {
881 struct target_ops **cur;
882
883 /* Check magic number. If wrong, it probably means someone changed
884 the struct definition, but not all the places that initialize one. */
885 if (t->to_magic != OPS_MAGIC)
886 {
887 fprintf_unfiltered (gdb_stderr,
888 "Magic number of %s target struct wrong\n",
889 t->to_shortname);
890 internal_error (__FILE__, __LINE__,
891 _("failed internal consistency check"));
892 }
893
894 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
895 if (*cur == t)
896 return 1;
897
898 return 0;
899 }
900
901 /* Using the objfile specified in OBJFILE, find the address for the
902 current thread's thread-local storage with offset OFFSET. */
903 CORE_ADDR
904 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
905 {
906 volatile CORE_ADDR addr = 0;
907 struct target_ops *target;
908
909 for (target = current_target.beneath;
910 target != NULL;
911 target = target->beneath)
912 {
913 if (target->to_get_thread_local_address != NULL)
914 break;
915 }
916
917 if (target != NULL
918 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
919 {
920 ptid_t ptid = inferior_ptid;
921 volatile struct gdb_exception ex;
922
923 TRY_CATCH (ex, RETURN_MASK_ALL)
924 {
925 CORE_ADDR lm_addr;
926
927 /* Fetch the load module address for this objfile. */
928 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
929 objfile);
930 /* If it's 0, throw the appropriate exception. */
931 if (lm_addr == 0)
932 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
933 _("TLS load module not found"));
934
935 addr = target->to_get_thread_local_address (target, ptid,
936 lm_addr, offset);
937 }
938 /* If an error occurred, print TLS related messages here. Otherwise,
939 throw the error to some higher catcher. */
940 if (ex.reason < 0)
941 {
942 int objfile_is_library = (objfile->flags & OBJF_SHARED);
943
944 switch (ex.error)
945 {
946 case TLS_NO_LIBRARY_SUPPORT_ERROR:
947 error (_("Cannot find thread-local variables "
948 "in this thread library."));
949 break;
950 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
951 if (objfile_is_library)
952 error (_("Cannot find shared library `%s' in dynamic"
953 " linker's load module list"), objfile_name (objfile));
954 else
955 error (_("Cannot find executable file `%s' in dynamic"
956 " linker's load module list"), objfile_name (objfile));
957 break;
958 case TLS_NOT_ALLOCATED_YET_ERROR:
959 if (objfile_is_library)
960 error (_("The inferior has not yet allocated storage for"
961 " thread-local variables in\n"
962 "the shared library `%s'\n"
963 "for %s"),
964 objfile_name (objfile), target_pid_to_str (ptid));
965 else
966 error (_("The inferior has not yet allocated storage for"
967 " thread-local variables in\n"
968 "the executable `%s'\n"
969 "for %s"),
970 objfile_name (objfile), target_pid_to_str (ptid));
971 break;
972 case TLS_GENERIC_ERROR:
973 if (objfile_is_library)
974 error (_("Cannot find thread-local storage for %s, "
975 "shared library %s:\n%s"),
976 target_pid_to_str (ptid),
977 objfile_name (objfile), ex.message);
978 else
979 error (_("Cannot find thread-local storage for %s, "
980 "executable file %s:\n%s"),
981 target_pid_to_str (ptid),
982 objfile_name (objfile), ex.message);
983 break;
984 default:
985 throw_exception (ex);
986 break;
987 }
988 }
989 }
990 /* It wouldn't be wrong here to try a gdbarch method, too; finding
991 TLS is an ABI-specific thing. But we don't do that yet. */
992 else
993 error (_("Cannot find thread-local variables on this target"));
994
995 return addr;
996 }
997
998 const char *
999 target_xfer_status_to_string (enum target_xfer_status err)
1000 {
1001 #define CASE(X) case X: return #X
1002 switch (err)
1003 {
1004 CASE(TARGET_XFER_E_IO);
1005 CASE(TARGET_XFER_E_UNAVAILABLE);
1006 default:
1007 return "<unknown>";
1008 }
1009 #undef CASE
1010 };
1011
1012
1013 #undef MIN
1014 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1015
1016 /* target_read_string -- read a null terminated string, up to LEN bytes,
1017 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1018 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1019 is responsible for freeing it. Return the number of bytes successfully
1020 read. */
1021
1022 int
1023 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1024 {
1025 int tlen, offset, i;
1026 gdb_byte buf[4];
1027 int errcode = 0;
1028 char *buffer;
1029 int buffer_allocated;
1030 char *bufptr;
1031 unsigned int nbytes_read = 0;
1032
1033 gdb_assert (string);
1034
1035 /* Small for testing. */
1036 buffer_allocated = 4;
1037 buffer = xmalloc (buffer_allocated);
1038 bufptr = buffer;
1039
1040 while (len > 0)
1041 {
1042 tlen = MIN (len, 4 - (memaddr & 3));
1043 offset = memaddr & 3;
1044
1045 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1046 if (errcode != 0)
1047 {
1048 /* The transfer request might have crossed the boundary to an
1049 unallocated region of memory. Retry the transfer, requesting
1050 a single byte. */
1051 tlen = 1;
1052 offset = 0;
1053 errcode = target_read_memory (memaddr, buf, 1);
1054 if (errcode != 0)
1055 goto done;
1056 }
1057
1058 if (bufptr - buffer + tlen > buffer_allocated)
1059 {
1060 unsigned int bytes;
1061
1062 bytes = bufptr - buffer;
1063 buffer_allocated *= 2;
1064 buffer = xrealloc (buffer, buffer_allocated);
1065 bufptr = buffer + bytes;
1066 }
1067
1068 for (i = 0; i < tlen; i++)
1069 {
1070 *bufptr++ = buf[i + offset];
1071 if (buf[i + offset] == '\000')
1072 {
1073 nbytes_read += i + 1;
1074 goto done;
1075 }
1076 }
1077
1078 memaddr += tlen;
1079 len -= tlen;
1080 nbytes_read += tlen;
1081 }
1082 done:
1083 *string = buffer;
1084 if (errnop != NULL)
1085 *errnop = errcode;
1086 return nbytes_read;
1087 }
1088
1089 struct target_section_table *
1090 target_get_section_table (struct target_ops *target)
1091 {
1092 if (targetdebug)
1093 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1094
1095 return (*target->to_get_section_table) (target);
1096 }
1097
1098 /* Find a section containing ADDR. */
1099
1100 struct target_section *
1101 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1102 {
1103 struct target_section_table *table = target_get_section_table (target);
1104 struct target_section *secp;
1105
1106 if (table == NULL)
1107 return NULL;
1108
1109 for (secp = table->sections; secp < table->sections_end; secp++)
1110 {
1111 if (addr >= secp->addr && addr < secp->endaddr)
1112 return secp;
1113 }
1114 return NULL;
1115 }
1116
1117 /* Read memory from the live target, even if currently inspecting a
1118 traceframe. The return is the same as that of target_read. */
1119
1120 static enum target_xfer_status
1121 target_read_live_memory (enum target_object object,
1122 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1123 ULONGEST *xfered_len)
1124 {
1125 enum target_xfer_status ret;
1126 struct cleanup *cleanup;
1127
1128 /* Switch momentarily out of tfind mode so to access live memory.
1129 Note that this must not clear global state, such as the frame
1130 cache, which must still remain valid for the previous traceframe.
1131 We may be _building_ the frame cache at this point. */
1132 cleanup = make_cleanup_restore_traceframe_number ();
1133 set_traceframe_number (-1);
1134
1135 ret = target_xfer_partial (current_target.beneath, object, NULL,
1136 myaddr, NULL, memaddr, len, xfered_len);
1137
1138 do_cleanups (cleanup);
1139 return ret;
1140 }
1141
1142 /* Using the set of read-only target sections of OPS, read live
1143 read-only memory. Note that the actual reads start from the
1144 top-most target again.
1145
1146 For interface/parameters/return description see target.h,
1147 to_xfer_partial. */
1148
1149 static enum target_xfer_status
1150 memory_xfer_live_readonly_partial (struct target_ops *ops,
1151 enum target_object object,
1152 gdb_byte *readbuf, ULONGEST memaddr,
1153 ULONGEST len, ULONGEST *xfered_len)
1154 {
1155 struct target_section *secp;
1156 struct target_section_table *table;
1157
1158 secp = target_section_by_addr (ops, memaddr);
1159 if (secp != NULL
1160 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1161 secp->the_bfd_section)
1162 & SEC_READONLY))
1163 {
1164 struct target_section *p;
1165 ULONGEST memend = memaddr + len;
1166
1167 table = target_get_section_table (ops);
1168
1169 for (p = table->sections; p < table->sections_end; p++)
1170 {
1171 if (memaddr >= p->addr)
1172 {
1173 if (memend <= p->endaddr)
1174 {
1175 /* Entire transfer is within this section. */
1176 return target_read_live_memory (object, memaddr,
1177 readbuf, len, xfered_len);
1178 }
1179 else if (memaddr >= p->endaddr)
1180 {
1181 /* This section ends before the transfer starts. */
1182 continue;
1183 }
1184 else
1185 {
1186 /* This section overlaps the transfer. Just do half. */
1187 len = p->endaddr - memaddr;
1188 return target_read_live_memory (object, memaddr,
1189 readbuf, len, xfered_len);
1190 }
1191 }
1192 }
1193 }
1194
1195 return TARGET_XFER_EOF;
1196 }
1197
1198 /* Read memory from more than one valid target. A core file, for
1199 instance, could have some of memory but delegate other bits to
1200 the target below it. So, we must manually try all targets. */
1201
1202 static enum target_xfer_status
1203 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1204 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1205 ULONGEST *xfered_len)
1206 {
1207 enum target_xfer_status res;
1208
1209 do
1210 {
1211 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1212 readbuf, writebuf, memaddr, len,
1213 xfered_len);
1214 if (res == TARGET_XFER_OK)
1215 break;
1216
1217 /* Stop if the target reports that the memory is not available. */
1218 if (res == TARGET_XFER_E_UNAVAILABLE)
1219 break;
1220
1221 /* We want to continue past core files to executables, but not
1222 past a running target's memory. */
1223 if (ops->to_has_all_memory (ops))
1224 break;
1225
1226 ops = ops->beneath;
1227 }
1228 while (ops != NULL);
1229
1230 return res;
1231 }
1232
1233 /* Perform a partial memory transfer.
1234 For docs see target.h, to_xfer_partial. */
1235
1236 static enum target_xfer_status
1237 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1238 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1239 ULONGEST len, ULONGEST *xfered_len)
1240 {
1241 enum target_xfer_status res;
1242 int reg_len;
1243 struct mem_region *region;
1244 struct inferior *inf;
1245
1246 /* For accesses to unmapped overlay sections, read directly from
1247 files. Must do this first, as MEMADDR may need adjustment. */
1248 if (readbuf != NULL && overlay_debugging)
1249 {
1250 struct obj_section *section = find_pc_overlay (memaddr);
1251
1252 if (pc_in_unmapped_range (memaddr, section))
1253 {
1254 struct target_section_table *table
1255 = target_get_section_table (ops);
1256 const char *section_name = section->the_bfd_section->name;
1257
1258 memaddr = overlay_mapped_address (memaddr, section);
1259 return section_table_xfer_memory_partial (readbuf, writebuf,
1260 memaddr, len, xfered_len,
1261 table->sections,
1262 table->sections_end,
1263 section_name);
1264 }
1265 }
1266
1267 /* Try the executable files, if "trust-readonly-sections" is set. */
1268 if (readbuf != NULL && trust_readonly)
1269 {
1270 struct target_section *secp;
1271 struct target_section_table *table;
1272
1273 secp = target_section_by_addr (ops, memaddr);
1274 if (secp != NULL
1275 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1276 secp->the_bfd_section)
1277 & SEC_READONLY))
1278 {
1279 table = target_get_section_table (ops);
1280 return section_table_xfer_memory_partial (readbuf, writebuf,
1281 memaddr, len, xfered_len,
1282 table->sections,
1283 table->sections_end,
1284 NULL);
1285 }
1286 }
1287
1288 /* If reading unavailable memory in the context of traceframes, and
1289 this address falls within a read-only section, fallback to
1290 reading from live memory. */
1291 if (readbuf != NULL && get_traceframe_number () != -1)
1292 {
1293 VEC(mem_range_s) *available;
1294
1295 /* If we fail to get the set of available memory, then the
1296 target does not support querying traceframe info, and so we
1297 attempt reading from the traceframe anyway (assuming the
1298 target implements the old QTro packet then). */
1299 if (traceframe_available_memory (&available, memaddr, len))
1300 {
1301 struct cleanup *old_chain;
1302
1303 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1304
1305 if (VEC_empty (mem_range_s, available)
1306 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1307 {
1308 /* Don't read into the traceframe's available
1309 memory. */
1310 if (!VEC_empty (mem_range_s, available))
1311 {
1312 LONGEST oldlen = len;
1313
1314 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1315 gdb_assert (len <= oldlen);
1316 }
1317
1318 do_cleanups (old_chain);
1319
1320 /* This goes through the topmost target again. */
1321 res = memory_xfer_live_readonly_partial (ops, object,
1322 readbuf, memaddr,
1323 len, xfered_len);
1324 if (res == TARGET_XFER_OK)
1325 return TARGET_XFER_OK;
1326 else
1327 {
1328 /* No use trying further, we know some memory starting
1329 at MEMADDR isn't available. */
1330 *xfered_len = len;
1331 return TARGET_XFER_E_UNAVAILABLE;
1332 }
1333 }
1334
1335 /* Don't try to read more than how much is available, in
1336 case the target implements the deprecated QTro packet to
1337 cater for older GDBs (the target's knowledge of read-only
1338 sections may be outdated by now). */
1339 len = VEC_index (mem_range_s, available, 0)->length;
1340
1341 do_cleanups (old_chain);
1342 }
1343 }
1344
1345 /* Try GDB's internal data cache. */
1346 region = lookup_mem_region (memaddr);
1347 /* region->hi == 0 means there's no upper bound. */
1348 if (memaddr + len < region->hi || region->hi == 0)
1349 reg_len = len;
1350 else
1351 reg_len = region->hi - memaddr;
1352
1353 switch (region->attrib.mode)
1354 {
1355 case MEM_RO:
1356 if (writebuf != NULL)
1357 return TARGET_XFER_E_IO;
1358 break;
1359
1360 case MEM_WO:
1361 if (readbuf != NULL)
1362 return TARGET_XFER_E_IO;
1363 break;
1364
1365 case MEM_FLASH:
1366 /* We only support writing to flash during "load" for now. */
1367 if (writebuf != NULL)
1368 error (_("Writing to flash memory forbidden in this context"));
1369 break;
1370
1371 case MEM_NONE:
1372 return TARGET_XFER_E_IO;
1373 }
1374
1375 if (!ptid_equal (inferior_ptid, null_ptid))
1376 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1377 else
1378 inf = NULL;
1379
1380 if (inf != NULL
1381 /* The dcache reads whole cache lines; that doesn't play well
1382 with reading from a trace buffer, because reading outside of
1383 the collected memory range fails. */
1384 && get_traceframe_number () == -1
1385 && (region->attrib.cache
1386 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1387 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1388 {
1389 DCACHE *dcache = target_dcache_get_or_init ();
1390 int l;
1391
1392 if (readbuf != NULL)
1393 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1394 else
1395 /* FIXME drow/2006-08-09: If we're going to preserve const
1396 correctness dcache_xfer_memory should take readbuf and
1397 writebuf. */
1398 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1399 reg_len, 1);
1400 if (l <= 0)
1401 return TARGET_XFER_E_IO;
1402 else
1403 {
1404 *xfered_len = (ULONGEST) l;
1405 return TARGET_XFER_OK;
1406 }
1407 }
1408
1409 /* If none of those methods found the memory we wanted, fall back
1410 to a target partial transfer. Normally a single call to
1411 to_xfer_partial is enough; if it doesn't recognize an object
1412 it will call the to_xfer_partial of the next target down.
1413 But for memory this won't do. Memory is the only target
1414 object which can be read from more than one valid target.
1415 A core file, for instance, could have some of memory but
1416 delegate other bits to the target below it. So, we must
1417 manually try all targets. */
1418
1419 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1420 xfered_len);
1421
1422 /* Make sure the cache gets updated no matter what - if we are writing
1423 to the stack. Even if this write is not tagged as such, we still need
1424 to update the cache. */
1425
1426 if (res == TARGET_XFER_OK
1427 && inf != NULL
1428 && writebuf != NULL
1429 && target_dcache_init_p ()
1430 && !region->attrib.cache
1431 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1432 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1433 {
1434 DCACHE *dcache = target_dcache_get ();
1435
1436 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1437 }
1438
1439 /* If we still haven't got anything, return the last error. We
1440 give up. */
1441 return res;
1442 }
1443
1444 /* Perform a partial memory transfer. For docs see target.h,
1445 to_xfer_partial. */
1446
1447 static enum target_xfer_status
1448 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1449 gdb_byte *readbuf, const gdb_byte *writebuf,
1450 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1451 {
1452 enum target_xfer_status res;
1453
1454 /* Zero length requests are ok and require no work. */
1455 if (len == 0)
1456 return TARGET_XFER_EOF;
1457
1458 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1459 breakpoint insns, thus hiding out from higher layers whether
1460 there are software breakpoints inserted in the code stream. */
1461 if (readbuf != NULL)
1462 {
1463 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1464 xfered_len);
1465
1466 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1467 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1468 }
1469 else
1470 {
1471 void *buf;
1472 struct cleanup *old_chain;
1473
1474 /* A large write request is likely to be partially satisfied
1475 by memory_xfer_partial_1. We will continually malloc
1476 and free a copy of the entire write request for breakpoint
1477 shadow handling even though we only end up writing a small
1478 subset of it. Cap writes to 4KB to mitigate this. */
1479 len = min (4096, len);
1480
1481 buf = xmalloc (len);
1482 old_chain = make_cleanup (xfree, buf);
1483 memcpy (buf, writebuf, len);
1484
1485 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1486 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1487 xfered_len);
1488
1489 do_cleanups (old_chain);
1490 }
1491
1492 return res;
1493 }
1494
1495 static void
1496 restore_show_memory_breakpoints (void *arg)
1497 {
1498 show_memory_breakpoints = (uintptr_t) arg;
1499 }
1500
1501 struct cleanup *
1502 make_show_memory_breakpoints_cleanup (int show)
1503 {
1504 int current = show_memory_breakpoints;
1505
1506 show_memory_breakpoints = show;
1507 return make_cleanup (restore_show_memory_breakpoints,
1508 (void *) (uintptr_t) current);
1509 }
1510
1511 /* For docs see target.h, to_xfer_partial. */
1512
1513 enum target_xfer_status
1514 target_xfer_partial (struct target_ops *ops,
1515 enum target_object object, const char *annex,
1516 gdb_byte *readbuf, const gdb_byte *writebuf,
1517 ULONGEST offset, ULONGEST len,
1518 ULONGEST *xfered_len)
1519 {
1520 enum target_xfer_status retval;
1521
1522 gdb_assert (ops->to_xfer_partial != NULL);
1523
1524 /* Transfer is done when LEN is zero. */
1525 if (len == 0)
1526 return TARGET_XFER_EOF;
1527
1528 if (writebuf && !may_write_memory)
1529 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1530 core_addr_to_string_nz (offset), plongest (len));
1531
1532 *xfered_len = 0;
1533
1534 /* If this is a memory transfer, let the memory-specific code
1535 have a look at it instead. Memory transfers are more
1536 complicated. */
1537 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1538 || object == TARGET_OBJECT_CODE_MEMORY)
1539 retval = memory_xfer_partial (ops, object, readbuf,
1540 writebuf, offset, len, xfered_len);
1541 else if (object == TARGET_OBJECT_RAW_MEMORY)
1542 {
1543 /* Request the normal memory object from other layers. */
1544 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1545 xfered_len);
1546 }
1547 else
1548 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1549 writebuf, offset, len, xfered_len);
1550
1551 if (targetdebug)
1552 {
1553 const unsigned char *myaddr = NULL;
1554
1555 fprintf_unfiltered (gdb_stdlog,
1556 "%s:target_xfer_partial "
1557 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1558 ops->to_shortname,
1559 (int) object,
1560 (annex ? annex : "(null)"),
1561 host_address_to_string (readbuf),
1562 host_address_to_string (writebuf),
1563 core_addr_to_string_nz (offset),
1564 pulongest (len), retval,
1565 pulongest (*xfered_len));
1566
1567 if (readbuf)
1568 myaddr = readbuf;
1569 if (writebuf)
1570 myaddr = writebuf;
1571 if (retval == TARGET_XFER_OK && myaddr != NULL)
1572 {
1573 int i;
1574
1575 fputs_unfiltered (", bytes =", gdb_stdlog);
1576 for (i = 0; i < *xfered_len; i++)
1577 {
1578 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1579 {
1580 if (targetdebug < 2 && i > 0)
1581 {
1582 fprintf_unfiltered (gdb_stdlog, " ...");
1583 break;
1584 }
1585 fprintf_unfiltered (gdb_stdlog, "\n");
1586 }
1587
1588 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1589 }
1590 }
1591
1592 fputc_unfiltered ('\n', gdb_stdlog);
1593 }
1594
1595 /* Check implementations of to_xfer_partial update *XFERED_LEN
1596 properly. Do assertion after printing debug messages, so that we
1597 can find more clues on assertion failure from debugging messages. */
1598 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1599 gdb_assert (*xfered_len > 0);
1600
1601 return retval;
1602 }
1603
1604 /* Read LEN bytes of target memory at address MEMADDR, placing the
1605 results in GDB's memory at MYADDR. Returns either 0 for success or
1606 TARGET_XFER_E_IO if any error occurs.
1607
1608 If an error occurs, no guarantee is made about the contents of the data at
1609 MYADDR. In particular, the caller should not depend upon partial reads
1610 filling the buffer with good data. There is no way for the caller to know
1611 how much good data might have been transfered anyway. Callers that can
1612 deal with partial reads should call target_read (which will retry until
1613 it makes no progress, and then return how much was transferred). */
1614
1615 int
1616 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1617 {
1618 /* Dispatch to the topmost target, not the flattened current_target.
1619 Memory accesses check target->to_has_(all_)memory, and the
1620 flattened target doesn't inherit those. */
1621 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1622 myaddr, memaddr, len) == len)
1623 return 0;
1624 else
1625 return TARGET_XFER_E_IO;
1626 }
1627
1628 /* Like target_read_memory, but specify explicitly that this is a read
1629 from the target's raw memory. That is, this read bypasses the
1630 dcache, breakpoint shadowing, etc. */
1631
1632 int
1633 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1634 {
1635 /* See comment in target_read_memory about why the request starts at
1636 current_target.beneath. */
1637 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1638 myaddr, memaddr, len) == len)
1639 return 0;
1640 else
1641 return TARGET_XFER_E_IO;
1642 }
1643
1644 /* Like target_read_memory, but specify explicitly that this is a read from
1645 the target's stack. This may trigger different cache behavior. */
1646
1647 int
1648 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1649 {
1650 /* See comment in target_read_memory about why the request starts at
1651 current_target.beneath. */
1652 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1653 myaddr, memaddr, len) == len)
1654 return 0;
1655 else
1656 return TARGET_XFER_E_IO;
1657 }
1658
1659 /* Like target_read_memory, but specify explicitly that this is a read from
1660 the target's code. This may trigger different cache behavior. */
1661
1662 int
1663 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1664 {
1665 /* See comment in target_read_memory about why the request starts at
1666 current_target.beneath. */
1667 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1668 myaddr, memaddr, len) == len)
1669 return 0;
1670 else
1671 return TARGET_XFER_E_IO;
1672 }
1673
1674 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1675 Returns either 0 for success or TARGET_XFER_E_IO if any
1676 error occurs. If an error occurs, no guarantee is made about how
1677 much data got written. Callers that can deal with partial writes
1678 should call target_write. */
1679
1680 int
1681 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1682 {
1683 /* See comment in target_read_memory about why the request starts at
1684 current_target.beneath. */
1685 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1686 myaddr, memaddr, len) == len)
1687 return 0;
1688 else
1689 return TARGET_XFER_E_IO;
1690 }
1691
1692 /* Write LEN bytes from MYADDR to target raw memory at address
1693 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1694 if any error occurs. If an error occurs, no guarantee is made
1695 about how much data got written. Callers that can deal with
1696 partial writes should call target_write. */
1697
1698 int
1699 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1700 {
1701 /* See comment in target_read_memory about why the request starts at
1702 current_target.beneath. */
1703 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1704 myaddr, memaddr, len) == len)
1705 return 0;
1706 else
1707 return TARGET_XFER_E_IO;
1708 }
1709
1710 /* Fetch the target's memory map. */
1711
1712 VEC(mem_region_s) *
1713 target_memory_map (void)
1714 {
1715 VEC(mem_region_s) *result;
1716 struct mem_region *last_one, *this_one;
1717 int ix;
1718 struct target_ops *t;
1719
1720 if (targetdebug)
1721 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1722
1723 result = current_target.to_memory_map (&current_target);
1724 if (result == NULL)
1725 return NULL;
1726
1727 qsort (VEC_address (mem_region_s, result),
1728 VEC_length (mem_region_s, result),
1729 sizeof (struct mem_region), mem_region_cmp);
1730
1731 /* Check that regions do not overlap. Simultaneously assign
1732 a numbering for the "mem" commands to use to refer to
1733 each region. */
1734 last_one = NULL;
1735 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1736 {
1737 this_one->number = ix;
1738
1739 if (last_one && last_one->hi > this_one->lo)
1740 {
1741 warning (_("Overlapping regions in memory map: ignoring"));
1742 VEC_free (mem_region_s, result);
1743 return NULL;
1744 }
1745 last_one = this_one;
1746 }
1747
1748 return result;
1749 }
1750
1751 void
1752 target_flash_erase (ULONGEST address, LONGEST length)
1753 {
1754 if (targetdebug)
1755 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1756 hex_string (address), phex (length, 0));
1757 current_target.to_flash_erase (&current_target, address, length);
1758 }
1759
1760 void
1761 target_flash_done (void)
1762 {
1763 if (targetdebug)
1764 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1765 current_target.to_flash_done (&current_target);
1766 }
1767
1768 static void
1769 show_trust_readonly (struct ui_file *file, int from_tty,
1770 struct cmd_list_element *c, const char *value)
1771 {
1772 fprintf_filtered (file,
1773 _("Mode for reading from readonly sections is %s.\n"),
1774 value);
1775 }
1776
1777 /* More generic transfers. */
1778
1779 static enum target_xfer_status
1780 default_xfer_partial (struct target_ops *ops, enum target_object object,
1781 const char *annex, gdb_byte *readbuf,
1782 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1783 ULONGEST *xfered_len)
1784 {
1785 if (object == TARGET_OBJECT_MEMORY
1786 && ops->deprecated_xfer_memory != NULL)
1787 /* If available, fall back to the target's
1788 "deprecated_xfer_memory" method. */
1789 {
1790 int xfered = -1;
1791
1792 errno = 0;
1793 if (writebuf != NULL)
1794 {
1795 void *buffer = xmalloc (len);
1796 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1797
1798 memcpy (buffer, writebuf, len);
1799 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1800 1/*write*/, NULL, ops);
1801 do_cleanups (cleanup);
1802 }
1803 if (readbuf != NULL)
1804 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1805 0/*read*/, NULL, ops);
1806 if (xfered > 0)
1807 {
1808 *xfered_len = (ULONGEST) xfered;
1809 return TARGET_XFER_E_IO;
1810 }
1811 else if (xfered == 0 && errno == 0)
1812 /* "deprecated_xfer_memory" uses 0, cross checked against
1813 ERRNO as one indication of an error. */
1814 return TARGET_XFER_EOF;
1815 else
1816 return TARGET_XFER_E_IO;
1817 }
1818 else
1819 {
1820 gdb_assert (ops->beneath != NULL);
1821 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1822 readbuf, writebuf, offset, len,
1823 xfered_len);
1824 }
1825 }
1826
1827 /* Target vector read/write partial wrapper functions. */
1828
1829 static enum target_xfer_status
1830 target_read_partial (struct target_ops *ops,
1831 enum target_object object,
1832 const char *annex, gdb_byte *buf,
1833 ULONGEST offset, ULONGEST len,
1834 ULONGEST *xfered_len)
1835 {
1836 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1837 xfered_len);
1838 }
1839
1840 static enum target_xfer_status
1841 target_write_partial (struct target_ops *ops,
1842 enum target_object object,
1843 const char *annex, const gdb_byte *buf,
1844 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1845 {
1846 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1847 xfered_len);
1848 }
1849
1850 /* Wrappers to perform the full transfer. */
1851
1852 /* For docs on target_read see target.h. */
1853
1854 LONGEST
1855 target_read (struct target_ops *ops,
1856 enum target_object object,
1857 const char *annex, gdb_byte *buf,
1858 ULONGEST offset, LONGEST len)
1859 {
1860 LONGEST xfered = 0;
1861
1862 while (xfered < len)
1863 {
1864 ULONGEST xfered_len;
1865 enum target_xfer_status status;
1866
1867 status = target_read_partial (ops, object, annex,
1868 (gdb_byte *) buf + xfered,
1869 offset + xfered, len - xfered,
1870 &xfered_len);
1871
1872 /* Call an observer, notifying them of the xfer progress? */
1873 if (status == TARGET_XFER_EOF)
1874 return xfered;
1875 else if (status == TARGET_XFER_OK)
1876 {
1877 xfered += xfered_len;
1878 QUIT;
1879 }
1880 else
1881 return -1;
1882
1883 }
1884 return len;
1885 }
1886
1887 /* Assuming that the entire [begin, end) range of memory cannot be
1888 read, try to read whatever subrange is possible to read.
1889
1890 The function returns, in RESULT, either zero or one memory block.
1891 If there's a readable subrange at the beginning, it is completely
1892 read and returned. Any further readable subrange will not be read.
1893 Otherwise, if there's a readable subrange at the end, it will be
1894 completely read and returned. Any readable subranges before it
1895 (obviously, not starting at the beginning), will be ignored. In
1896 other cases -- either no readable subrange, or readable subrange(s)
1897 that is neither at the beginning, or end, nothing is returned.
1898
1899 The purpose of this function is to handle a read across a boundary
1900 of accessible memory in a case when memory map is not available.
1901 The above restrictions are fine for this case, but will give
1902 incorrect results if the memory is 'patchy'. However, supporting
1903 'patchy' memory would require trying to read every single byte,
1904 and it seems unacceptable solution. Explicit memory map is
1905 recommended for this case -- and target_read_memory_robust will
1906 take care of reading multiple ranges then. */
1907
1908 static void
1909 read_whatever_is_readable (struct target_ops *ops,
1910 ULONGEST begin, ULONGEST end,
1911 VEC(memory_read_result_s) **result)
1912 {
1913 gdb_byte *buf = xmalloc (end - begin);
1914 ULONGEST current_begin = begin;
1915 ULONGEST current_end = end;
1916 int forward;
1917 memory_read_result_s r;
1918 ULONGEST xfered_len;
1919
1920 /* If we previously failed to read 1 byte, nothing can be done here. */
1921 if (end - begin <= 1)
1922 {
1923 xfree (buf);
1924 return;
1925 }
1926
1927 /* Check that either first or the last byte is readable, and give up
1928 if not. This heuristic is meant to permit reading accessible memory
1929 at the boundary of accessible region. */
1930 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1931 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1932 {
1933 forward = 1;
1934 ++current_begin;
1935 }
1936 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1937 buf + (end-begin) - 1, end - 1, 1,
1938 &xfered_len) == TARGET_XFER_OK)
1939 {
1940 forward = 0;
1941 --current_end;
1942 }
1943 else
1944 {
1945 xfree (buf);
1946 return;
1947 }
1948
1949 /* Loop invariant is that the [current_begin, current_end) was previously
1950 found to be not readable as a whole.
1951
1952 Note loop condition -- if the range has 1 byte, we can't divide the range
1953 so there's no point trying further. */
1954 while (current_end - current_begin > 1)
1955 {
1956 ULONGEST first_half_begin, first_half_end;
1957 ULONGEST second_half_begin, second_half_end;
1958 LONGEST xfer;
1959 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1960
1961 if (forward)
1962 {
1963 first_half_begin = current_begin;
1964 first_half_end = middle;
1965 second_half_begin = middle;
1966 second_half_end = current_end;
1967 }
1968 else
1969 {
1970 first_half_begin = middle;
1971 first_half_end = current_end;
1972 second_half_begin = current_begin;
1973 second_half_end = middle;
1974 }
1975
1976 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1977 buf + (first_half_begin - begin),
1978 first_half_begin,
1979 first_half_end - first_half_begin);
1980
1981 if (xfer == first_half_end - first_half_begin)
1982 {
1983 /* This half reads up fine. So, the error must be in the
1984 other half. */
1985 current_begin = second_half_begin;
1986 current_end = second_half_end;
1987 }
1988 else
1989 {
1990 /* This half is not readable. Because we've tried one byte, we
1991 know some part of this half if actually redable. Go to the next
1992 iteration to divide again and try to read.
1993
1994 We don't handle the other half, because this function only tries
1995 to read a single readable subrange. */
1996 current_begin = first_half_begin;
1997 current_end = first_half_end;
1998 }
1999 }
2000
2001 if (forward)
2002 {
2003 /* The [begin, current_begin) range has been read. */
2004 r.begin = begin;
2005 r.end = current_begin;
2006 r.data = buf;
2007 }
2008 else
2009 {
2010 /* The [current_end, end) range has been read. */
2011 LONGEST rlen = end - current_end;
2012
2013 r.data = xmalloc (rlen);
2014 memcpy (r.data, buf + current_end - begin, rlen);
2015 r.begin = current_end;
2016 r.end = end;
2017 xfree (buf);
2018 }
2019 VEC_safe_push(memory_read_result_s, (*result), &r);
2020 }
2021
2022 void
2023 free_memory_read_result_vector (void *x)
2024 {
2025 VEC(memory_read_result_s) *v = x;
2026 memory_read_result_s *current;
2027 int ix;
2028
2029 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2030 {
2031 xfree (current->data);
2032 }
2033 VEC_free (memory_read_result_s, v);
2034 }
2035
2036 VEC(memory_read_result_s) *
2037 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2038 {
2039 VEC(memory_read_result_s) *result = 0;
2040
2041 LONGEST xfered = 0;
2042 while (xfered < len)
2043 {
2044 struct mem_region *region = lookup_mem_region (offset + xfered);
2045 LONGEST rlen;
2046
2047 /* If there is no explicit region, a fake one should be created. */
2048 gdb_assert (region);
2049
2050 if (region->hi == 0)
2051 rlen = len - xfered;
2052 else
2053 rlen = region->hi - offset;
2054
2055 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2056 {
2057 /* Cannot read this region. Note that we can end up here only
2058 if the region is explicitly marked inaccessible, or
2059 'inaccessible-by-default' is in effect. */
2060 xfered += rlen;
2061 }
2062 else
2063 {
2064 LONGEST to_read = min (len - xfered, rlen);
2065 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2066
2067 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2068 (gdb_byte *) buffer,
2069 offset + xfered, to_read);
2070 /* Call an observer, notifying them of the xfer progress? */
2071 if (xfer <= 0)
2072 {
2073 /* Got an error reading full chunk. See if maybe we can read
2074 some subrange. */
2075 xfree (buffer);
2076 read_whatever_is_readable (ops, offset + xfered,
2077 offset + xfered + to_read, &result);
2078 xfered += to_read;
2079 }
2080 else
2081 {
2082 struct memory_read_result r;
2083 r.data = buffer;
2084 r.begin = offset + xfered;
2085 r.end = r.begin + xfer;
2086 VEC_safe_push (memory_read_result_s, result, &r);
2087 xfered += xfer;
2088 }
2089 QUIT;
2090 }
2091 }
2092 return result;
2093 }
2094
2095
2096 /* An alternative to target_write with progress callbacks. */
2097
2098 LONGEST
2099 target_write_with_progress (struct target_ops *ops,
2100 enum target_object object,
2101 const char *annex, const gdb_byte *buf,
2102 ULONGEST offset, LONGEST len,
2103 void (*progress) (ULONGEST, void *), void *baton)
2104 {
2105 LONGEST xfered = 0;
2106
2107 /* Give the progress callback a chance to set up. */
2108 if (progress)
2109 (*progress) (0, baton);
2110
2111 while (xfered < len)
2112 {
2113 ULONGEST xfered_len;
2114 enum target_xfer_status status;
2115
2116 status = target_write_partial (ops, object, annex,
2117 (gdb_byte *) buf + xfered,
2118 offset + xfered, len - xfered,
2119 &xfered_len);
2120
2121 if (status == TARGET_XFER_EOF)
2122 return xfered;
2123 if (TARGET_XFER_STATUS_ERROR_P (status))
2124 return -1;
2125
2126 gdb_assert (status == TARGET_XFER_OK);
2127 if (progress)
2128 (*progress) (xfered_len, baton);
2129
2130 xfered += xfered_len;
2131 QUIT;
2132 }
2133 return len;
2134 }
2135
2136 /* For docs on target_write see target.h. */
2137
2138 LONGEST
2139 target_write (struct target_ops *ops,
2140 enum target_object object,
2141 const char *annex, const gdb_byte *buf,
2142 ULONGEST offset, LONGEST len)
2143 {
2144 return target_write_with_progress (ops, object, annex, buf, offset, len,
2145 NULL, NULL);
2146 }
2147
2148 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2149 the size of the transferred data. PADDING additional bytes are
2150 available in *BUF_P. This is a helper function for
2151 target_read_alloc; see the declaration of that function for more
2152 information. */
2153
2154 static LONGEST
2155 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2156 const char *annex, gdb_byte **buf_p, int padding)
2157 {
2158 size_t buf_alloc, buf_pos;
2159 gdb_byte *buf;
2160
2161 /* This function does not have a length parameter; it reads the
2162 entire OBJECT). Also, it doesn't support objects fetched partly
2163 from one target and partly from another (in a different stratum,
2164 e.g. a core file and an executable). Both reasons make it
2165 unsuitable for reading memory. */
2166 gdb_assert (object != TARGET_OBJECT_MEMORY);
2167
2168 /* Start by reading up to 4K at a time. The target will throttle
2169 this number down if necessary. */
2170 buf_alloc = 4096;
2171 buf = xmalloc (buf_alloc);
2172 buf_pos = 0;
2173 while (1)
2174 {
2175 ULONGEST xfered_len;
2176 enum target_xfer_status status;
2177
2178 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2179 buf_pos, buf_alloc - buf_pos - padding,
2180 &xfered_len);
2181
2182 if (status == TARGET_XFER_EOF)
2183 {
2184 /* Read all there was. */
2185 if (buf_pos == 0)
2186 xfree (buf);
2187 else
2188 *buf_p = buf;
2189 return buf_pos;
2190 }
2191 else if (status != TARGET_XFER_OK)
2192 {
2193 /* An error occurred. */
2194 xfree (buf);
2195 return TARGET_XFER_E_IO;
2196 }
2197
2198 buf_pos += xfered_len;
2199
2200 /* If the buffer is filling up, expand it. */
2201 if (buf_alloc < buf_pos * 2)
2202 {
2203 buf_alloc *= 2;
2204 buf = xrealloc (buf, buf_alloc);
2205 }
2206
2207 QUIT;
2208 }
2209 }
2210
2211 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2212 the size of the transferred data. See the declaration in "target.h"
2213 function for more information about the return value. */
2214
2215 LONGEST
2216 target_read_alloc (struct target_ops *ops, enum target_object object,
2217 const char *annex, gdb_byte **buf_p)
2218 {
2219 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2220 }
2221
2222 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2223 returned as a string, allocated using xmalloc. If an error occurs
2224 or the transfer is unsupported, NULL is returned. Empty objects
2225 are returned as allocated but empty strings. A warning is issued
2226 if the result contains any embedded NUL bytes. */
2227
2228 char *
2229 target_read_stralloc (struct target_ops *ops, enum target_object object,
2230 const char *annex)
2231 {
2232 gdb_byte *buffer;
2233 char *bufstr;
2234 LONGEST i, transferred;
2235
2236 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2237 bufstr = (char *) buffer;
2238
2239 if (transferred < 0)
2240 return NULL;
2241
2242 if (transferred == 0)
2243 return xstrdup ("");
2244
2245 bufstr[transferred] = 0;
2246
2247 /* Check for embedded NUL bytes; but allow trailing NULs. */
2248 for (i = strlen (bufstr); i < transferred; i++)
2249 if (bufstr[i] != 0)
2250 {
2251 warning (_("target object %d, annex %s, "
2252 "contained unexpected null characters"),
2253 (int) object, annex ? annex : "(none)");
2254 break;
2255 }
2256
2257 return bufstr;
2258 }
2259
2260 /* Memory transfer methods. */
2261
2262 void
2263 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2264 LONGEST len)
2265 {
2266 /* This method is used to read from an alternate, non-current
2267 target. This read must bypass the overlay support (as symbols
2268 don't match this target), and GDB's internal cache (wrong cache
2269 for this target). */
2270 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2271 != len)
2272 memory_error (TARGET_XFER_E_IO, addr);
2273 }
2274
2275 ULONGEST
2276 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2277 int len, enum bfd_endian byte_order)
2278 {
2279 gdb_byte buf[sizeof (ULONGEST)];
2280
2281 gdb_assert (len <= sizeof (buf));
2282 get_target_memory (ops, addr, buf, len);
2283 return extract_unsigned_integer (buf, len, byte_order);
2284 }
2285
2286 /* See target.h. */
2287
2288 int
2289 target_insert_breakpoint (struct gdbarch *gdbarch,
2290 struct bp_target_info *bp_tgt)
2291 {
2292 if (!may_insert_breakpoints)
2293 {
2294 warning (_("May not insert breakpoints"));
2295 return 1;
2296 }
2297
2298 return current_target.to_insert_breakpoint (&current_target,
2299 gdbarch, bp_tgt);
2300 }
2301
2302 /* See target.h. */
2303
2304 int
2305 target_remove_breakpoint (struct gdbarch *gdbarch,
2306 struct bp_target_info *bp_tgt)
2307 {
2308 /* This is kind of a weird case to handle, but the permission might
2309 have been changed after breakpoints were inserted - in which case
2310 we should just take the user literally and assume that any
2311 breakpoints should be left in place. */
2312 if (!may_insert_breakpoints)
2313 {
2314 warning (_("May not remove breakpoints"));
2315 return 1;
2316 }
2317
2318 return current_target.to_remove_breakpoint (&current_target,
2319 gdbarch, bp_tgt);
2320 }
2321
2322 static void
2323 target_info (char *args, int from_tty)
2324 {
2325 struct target_ops *t;
2326 int has_all_mem = 0;
2327
2328 if (symfile_objfile != NULL)
2329 printf_unfiltered (_("Symbols from \"%s\".\n"),
2330 objfile_name (symfile_objfile));
2331
2332 for (t = target_stack; t != NULL; t = t->beneath)
2333 {
2334 if (!(*t->to_has_memory) (t))
2335 continue;
2336
2337 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2338 continue;
2339 if (has_all_mem)
2340 printf_unfiltered (_("\tWhile running this, "
2341 "GDB does not access memory from...\n"));
2342 printf_unfiltered ("%s:\n", t->to_longname);
2343 (t->to_files_info) (t);
2344 has_all_mem = (*t->to_has_all_memory) (t);
2345 }
2346 }
2347
2348 /* This function is called before any new inferior is created, e.g.
2349 by running a program, attaching, or connecting to a target.
2350 It cleans up any state from previous invocations which might
2351 change between runs. This is a subset of what target_preopen
2352 resets (things which might change between targets). */
2353
2354 void
2355 target_pre_inferior (int from_tty)
2356 {
2357 /* Clear out solib state. Otherwise the solib state of the previous
2358 inferior might have survived and is entirely wrong for the new
2359 target. This has been observed on GNU/Linux using glibc 2.3. How
2360 to reproduce:
2361
2362 bash$ ./foo&
2363 [1] 4711
2364 bash$ ./foo&
2365 [1] 4712
2366 bash$ gdb ./foo
2367 [...]
2368 (gdb) attach 4711
2369 (gdb) detach
2370 (gdb) attach 4712
2371 Cannot access memory at address 0xdeadbeef
2372 */
2373
2374 /* In some OSs, the shared library list is the same/global/shared
2375 across inferiors. If code is shared between processes, so are
2376 memory regions and features. */
2377 if (!gdbarch_has_global_solist (target_gdbarch ()))
2378 {
2379 no_shared_libraries (NULL, from_tty);
2380
2381 invalidate_target_mem_regions ();
2382
2383 target_clear_description ();
2384 }
2385
2386 agent_capability_invalidate ();
2387 }
2388
2389 /* Callback for iterate_over_inferiors. Gets rid of the given
2390 inferior. */
2391
2392 static int
2393 dispose_inferior (struct inferior *inf, void *args)
2394 {
2395 struct thread_info *thread;
2396
2397 thread = any_thread_of_process (inf->pid);
2398 if (thread)
2399 {
2400 switch_to_thread (thread->ptid);
2401
2402 /* Core inferiors actually should be detached, not killed. */
2403 if (target_has_execution)
2404 target_kill ();
2405 else
2406 target_detach (NULL, 0);
2407 }
2408
2409 return 0;
2410 }
2411
2412 /* This is to be called by the open routine before it does
2413 anything. */
2414
2415 void
2416 target_preopen (int from_tty)
2417 {
2418 dont_repeat ();
2419
2420 if (have_inferiors ())
2421 {
2422 if (!from_tty
2423 || !have_live_inferiors ()
2424 || query (_("A program is being debugged already. Kill it? ")))
2425 iterate_over_inferiors (dispose_inferior, NULL);
2426 else
2427 error (_("Program not killed."));
2428 }
2429
2430 /* Calling target_kill may remove the target from the stack. But if
2431 it doesn't (which seems like a win for UDI), remove it now. */
2432 /* Leave the exec target, though. The user may be switching from a
2433 live process to a core of the same program. */
2434 pop_all_targets_above (file_stratum);
2435
2436 target_pre_inferior (from_tty);
2437 }
2438
2439 /* Detach a target after doing deferred register stores. */
2440
2441 void
2442 target_detach (const char *args, int from_tty)
2443 {
2444 struct target_ops* t;
2445
2446 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2447 /* Don't remove global breakpoints here. They're removed on
2448 disconnection from the target. */
2449 ;
2450 else
2451 /* If we're in breakpoints-always-inserted mode, have to remove
2452 them before detaching. */
2453 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2454
2455 prepare_for_detach ();
2456
2457 current_target.to_detach (&current_target, args, from_tty);
2458 if (targetdebug)
2459 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2460 args, from_tty);
2461 }
2462
2463 void
2464 target_disconnect (char *args, int from_tty)
2465 {
2466 struct target_ops *t;
2467
2468 /* If we're in breakpoints-always-inserted mode or if breakpoints
2469 are global across processes, we have to remove them before
2470 disconnecting. */
2471 remove_breakpoints ();
2472
2473 for (t = current_target.beneath; t != NULL; t = t->beneath)
2474 if (t->to_disconnect != NULL)
2475 {
2476 if (targetdebug)
2477 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2478 args, from_tty);
2479 t->to_disconnect (t, args, from_tty);
2480 return;
2481 }
2482
2483 tcomplain ();
2484 }
2485
2486 ptid_t
2487 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2488 {
2489 struct target_ops *t;
2490 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2491 status, options);
2492
2493 if (targetdebug)
2494 {
2495 char *status_string;
2496 char *options_string;
2497
2498 status_string = target_waitstatus_to_string (status);
2499 options_string = target_options_to_string (options);
2500 fprintf_unfiltered (gdb_stdlog,
2501 "target_wait (%d, status, options={%s})"
2502 " = %d, %s\n",
2503 ptid_get_pid (ptid), options_string,
2504 ptid_get_pid (retval), status_string);
2505 xfree (status_string);
2506 xfree (options_string);
2507 }
2508
2509 return retval;
2510 }
2511
2512 char *
2513 target_pid_to_str (ptid_t ptid)
2514 {
2515 return (*current_target.to_pid_to_str) (&current_target, ptid);
2516 }
2517
2518 char *
2519 target_thread_name (struct thread_info *info)
2520 {
2521 return current_target.to_thread_name (&current_target, info);
2522 }
2523
2524 void
2525 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2526 {
2527 struct target_ops *t;
2528
2529 target_dcache_invalidate ();
2530
2531 current_target.to_resume (&current_target, ptid, step, signal);
2532 if (targetdebug)
2533 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2534 ptid_get_pid (ptid),
2535 step ? "step" : "continue",
2536 gdb_signal_to_name (signal));
2537
2538 registers_changed_ptid (ptid);
2539 set_executing (ptid, 1);
2540 set_running (ptid, 1);
2541 clear_inline_frame_state (ptid);
2542 }
2543
2544 void
2545 target_pass_signals (int numsigs, unsigned char *pass_signals)
2546 {
2547 if (targetdebug)
2548 {
2549 int i;
2550
2551 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2552 numsigs);
2553
2554 for (i = 0; i < numsigs; i++)
2555 if (pass_signals[i])
2556 fprintf_unfiltered (gdb_stdlog, " %s",
2557 gdb_signal_to_name (i));
2558
2559 fprintf_unfiltered (gdb_stdlog, " })\n");
2560 }
2561
2562 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2563 }
2564
2565 void
2566 target_program_signals (int numsigs, unsigned char *program_signals)
2567 {
2568 if (targetdebug)
2569 {
2570 int i;
2571
2572 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2573 numsigs);
2574
2575 for (i = 0; i < numsigs; i++)
2576 if (program_signals[i])
2577 fprintf_unfiltered (gdb_stdlog, " %s",
2578 gdb_signal_to_name (i));
2579
2580 fprintf_unfiltered (gdb_stdlog, " })\n");
2581 }
2582
2583 (*current_target.to_program_signals) (&current_target,
2584 numsigs, program_signals);
2585 }
2586
2587 static int
2588 default_follow_fork (struct target_ops *self, int follow_child,
2589 int detach_fork)
2590 {
2591 /* Some target returned a fork event, but did not know how to follow it. */
2592 internal_error (__FILE__, __LINE__,
2593 _("could not find a target to follow fork"));
2594 }
2595
2596 /* Look through the list of possible targets for a target that can
2597 follow forks. */
2598
2599 int
2600 target_follow_fork (int follow_child, int detach_fork)
2601 {
2602 int retval = current_target.to_follow_fork (&current_target,
2603 follow_child, detach_fork);
2604
2605 if (targetdebug)
2606 fprintf_unfiltered (gdb_stdlog,
2607 "target_follow_fork (%d, %d) = %d\n",
2608 follow_child, detach_fork, retval);
2609 return retval;
2610 }
2611
2612 static void
2613 default_mourn_inferior (struct target_ops *self)
2614 {
2615 internal_error (__FILE__, __LINE__,
2616 _("could not find a target to follow mourn inferior"));
2617 }
2618
2619 void
2620 target_mourn_inferior (void)
2621 {
2622 current_target.to_mourn_inferior (&current_target);
2623 if (targetdebug)
2624 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2625
2626 /* We no longer need to keep handles on any of the object files.
2627 Make sure to release them to avoid unnecessarily locking any
2628 of them while we're not actually debugging. */
2629 bfd_cache_close_all ();
2630 }
2631
2632 /* Look for a target which can describe architectural features, starting
2633 from TARGET. If we find one, return its description. */
2634
2635 const struct target_desc *
2636 target_read_description (struct target_ops *target)
2637 {
2638 struct target_ops *t;
2639
2640 for (t = target; t != NULL; t = t->beneath)
2641 if (t->to_read_description != NULL)
2642 {
2643 const struct target_desc *tdesc;
2644
2645 tdesc = t->to_read_description (t);
2646 if (tdesc)
2647 return tdesc;
2648 }
2649
2650 return NULL;
2651 }
2652
2653 /* The default implementation of to_search_memory.
2654 This implements a basic search of memory, reading target memory and
2655 performing the search here (as opposed to performing the search in on the
2656 target side with, for example, gdbserver). */
2657
2658 int
2659 simple_search_memory (struct target_ops *ops,
2660 CORE_ADDR start_addr, ULONGEST search_space_len,
2661 const gdb_byte *pattern, ULONGEST pattern_len,
2662 CORE_ADDR *found_addrp)
2663 {
2664 /* NOTE: also defined in find.c testcase. */
2665 #define SEARCH_CHUNK_SIZE 16000
2666 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2667 /* Buffer to hold memory contents for searching. */
2668 gdb_byte *search_buf;
2669 unsigned search_buf_size;
2670 struct cleanup *old_cleanups;
2671
2672 search_buf_size = chunk_size + pattern_len - 1;
2673
2674 /* No point in trying to allocate a buffer larger than the search space. */
2675 if (search_space_len < search_buf_size)
2676 search_buf_size = search_space_len;
2677
2678 search_buf = malloc (search_buf_size);
2679 if (search_buf == NULL)
2680 error (_("Unable to allocate memory to perform the search."));
2681 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2682
2683 /* Prime the search buffer. */
2684
2685 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2686 search_buf, start_addr, search_buf_size) != search_buf_size)
2687 {
2688 warning (_("Unable to access %s bytes of target "
2689 "memory at %s, halting search."),
2690 pulongest (search_buf_size), hex_string (start_addr));
2691 do_cleanups (old_cleanups);
2692 return -1;
2693 }
2694
2695 /* Perform the search.
2696
2697 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2698 When we've scanned N bytes we copy the trailing bytes to the start and
2699 read in another N bytes. */
2700
2701 while (search_space_len >= pattern_len)
2702 {
2703 gdb_byte *found_ptr;
2704 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2705
2706 found_ptr = memmem (search_buf, nr_search_bytes,
2707 pattern, pattern_len);
2708
2709 if (found_ptr != NULL)
2710 {
2711 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2712
2713 *found_addrp = found_addr;
2714 do_cleanups (old_cleanups);
2715 return 1;
2716 }
2717
2718 /* Not found in this chunk, skip to next chunk. */
2719
2720 /* Don't let search_space_len wrap here, it's unsigned. */
2721 if (search_space_len >= chunk_size)
2722 search_space_len -= chunk_size;
2723 else
2724 search_space_len = 0;
2725
2726 if (search_space_len >= pattern_len)
2727 {
2728 unsigned keep_len = search_buf_size - chunk_size;
2729 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2730 int nr_to_read;
2731
2732 /* Copy the trailing part of the previous iteration to the front
2733 of the buffer for the next iteration. */
2734 gdb_assert (keep_len == pattern_len - 1);
2735 memcpy (search_buf, search_buf + chunk_size, keep_len);
2736
2737 nr_to_read = min (search_space_len - keep_len, chunk_size);
2738
2739 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2740 search_buf + keep_len, read_addr,
2741 nr_to_read) != nr_to_read)
2742 {
2743 warning (_("Unable to access %s bytes of target "
2744 "memory at %s, halting search."),
2745 plongest (nr_to_read),
2746 hex_string (read_addr));
2747 do_cleanups (old_cleanups);
2748 return -1;
2749 }
2750
2751 start_addr += chunk_size;
2752 }
2753 }
2754
2755 /* Not found. */
2756
2757 do_cleanups (old_cleanups);
2758 return 0;
2759 }
2760
2761 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2762 sequence of bytes in PATTERN with length PATTERN_LEN.
2763
2764 The result is 1 if found, 0 if not found, and -1 if there was an error
2765 requiring halting of the search (e.g. memory read error).
2766 If the pattern is found the address is recorded in FOUND_ADDRP. */
2767
2768 int
2769 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2770 const gdb_byte *pattern, ULONGEST pattern_len,
2771 CORE_ADDR *found_addrp)
2772 {
2773 struct target_ops *t;
2774 int found;
2775
2776 /* We don't use INHERIT to set current_target.to_search_memory,
2777 so we have to scan the target stack and handle targetdebug
2778 ourselves. */
2779
2780 if (targetdebug)
2781 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2782 hex_string (start_addr));
2783
2784 for (t = current_target.beneath; t != NULL; t = t->beneath)
2785 if (t->to_search_memory != NULL)
2786 break;
2787
2788 if (t != NULL)
2789 {
2790 found = t->to_search_memory (t, start_addr, search_space_len,
2791 pattern, pattern_len, found_addrp);
2792 }
2793 else
2794 {
2795 /* If a special version of to_search_memory isn't available, use the
2796 simple version. */
2797 found = simple_search_memory (current_target.beneath,
2798 start_addr, search_space_len,
2799 pattern, pattern_len, found_addrp);
2800 }
2801
2802 if (targetdebug)
2803 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2804
2805 return found;
2806 }
2807
2808 /* Look through the currently pushed targets. If none of them will
2809 be able to restart the currently running process, issue an error
2810 message. */
2811
2812 void
2813 target_require_runnable (void)
2814 {
2815 struct target_ops *t;
2816
2817 for (t = target_stack; t != NULL; t = t->beneath)
2818 {
2819 /* If this target knows how to create a new program, then
2820 assume we will still be able to after killing the current
2821 one. Either killing and mourning will not pop T, or else
2822 find_default_run_target will find it again. */
2823 if (t->to_create_inferior != NULL)
2824 return;
2825
2826 /* Do not worry about thread_stratum targets that can not
2827 create inferiors. Assume they will be pushed again if
2828 necessary, and continue to the process_stratum. */
2829 if (t->to_stratum == thread_stratum
2830 || t->to_stratum == arch_stratum)
2831 continue;
2832
2833 error (_("The \"%s\" target does not support \"run\". "
2834 "Try \"help target\" or \"continue\"."),
2835 t->to_shortname);
2836 }
2837
2838 /* This function is only called if the target is running. In that
2839 case there should have been a process_stratum target and it
2840 should either know how to create inferiors, or not... */
2841 internal_error (__FILE__, __LINE__, _("No targets found"));
2842 }
2843
2844 /* Look through the list of possible targets for a target that can
2845 execute a run or attach command without any other data. This is
2846 used to locate the default process stratum.
2847
2848 If DO_MESG is not NULL, the result is always valid (error() is
2849 called for errors); else, return NULL on error. */
2850
2851 static struct target_ops *
2852 find_default_run_target (char *do_mesg)
2853 {
2854 struct target_ops **t;
2855 struct target_ops *runable = NULL;
2856 int count;
2857
2858 count = 0;
2859
2860 for (t = target_structs; t < target_structs + target_struct_size;
2861 ++t)
2862 {
2863 if ((*t)->to_can_run && target_can_run (*t))
2864 {
2865 runable = *t;
2866 ++count;
2867 }
2868 }
2869
2870 if (count != 1)
2871 {
2872 if (do_mesg)
2873 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2874 else
2875 return NULL;
2876 }
2877
2878 return runable;
2879 }
2880
2881 void
2882 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2883 {
2884 struct target_ops *t;
2885
2886 t = find_default_run_target ("attach");
2887 (t->to_attach) (t, args, from_tty);
2888 return;
2889 }
2890
2891 void
2892 find_default_create_inferior (struct target_ops *ops,
2893 char *exec_file, char *allargs, char **env,
2894 int from_tty)
2895 {
2896 struct target_ops *t;
2897
2898 t = find_default_run_target ("run");
2899 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2900 return;
2901 }
2902
2903 static int
2904 find_default_can_async_p (struct target_ops *ignore)
2905 {
2906 struct target_ops *t;
2907
2908 /* This may be called before the target is pushed on the stack;
2909 look for the default process stratum. If there's none, gdb isn't
2910 configured with a native debugger, and target remote isn't
2911 connected yet. */
2912 t = find_default_run_target (NULL);
2913 if (t && t->to_can_async_p != delegate_can_async_p)
2914 return (t->to_can_async_p) (t);
2915 return 0;
2916 }
2917
2918 static int
2919 find_default_is_async_p (struct target_ops *ignore)
2920 {
2921 struct target_ops *t;
2922
2923 /* This may be called before the target is pushed on the stack;
2924 look for the default process stratum. If there's none, gdb isn't
2925 configured with a native debugger, and target remote isn't
2926 connected yet. */
2927 t = find_default_run_target (NULL);
2928 if (t && t->to_is_async_p != delegate_is_async_p)
2929 return (t->to_is_async_p) (t);
2930 return 0;
2931 }
2932
2933 static int
2934 find_default_supports_non_stop (struct target_ops *self)
2935 {
2936 struct target_ops *t;
2937
2938 t = find_default_run_target (NULL);
2939 if (t && t->to_supports_non_stop)
2940 return (t->to_supports_non_stop) (t);
2941 return 0;
2942 }
2943
2944 int
2945 target_supports_non_stop (void)
2946 {
2947 struct target_ops *t;
2948
2949 for (t = &current_target; t != NULL; t = t->beneath)
2950 if (t->to_supports_non_stop)
2951 return t->to_supports_non_stop (t);
2952
2953 return 0;
2954 }
2955
2956 /* Implement the "info proc" command. */
2957
2958 int
2959 target_info_proc (char *args, enum info_proc_what what)
2960 {
2961 struct target_ops *t;
2962
2963 /* If we're already connected to something that can get us OS
2964 related data, use it. Otherwise, try using the native
2965 target. */
2966 if (current_target.to_stratum >= process_stratum)
2967 t = current_target.beneath;
2968 else
2969 t = find_default_run_target (NULL);
2970
2971 for (; t != NULL; t = t->beneath)
2972 {
2973 if (t->to_info_proc != NULL)
2974 {
2975 t->to_info_proc (t, args, what);
2976
2977 if (targetdebug)
2978 fprintf_unfiltered (gdb_stdlog,
2979 "target_info_proc (\"%s\", %d)\n", args, what);
2980
2981 return 1;
2982 }
2983 }
2984
2985 return 0;
2986 }
2987
2988 static int
2989 find_default_supports_disable_randomization (struct target_ops *self)
2990 {
2991 struct target_ops *t;
2992
2993 t = find_default_run_target (NULL);
2994 if (t && t->to_supports_disable_randomization)
2995 return (t->to_supports_disable_randomization) (t);
2996 return 0;
2997 }
2998
2999 int
3000 target_supports_disable_randomization (void)
3001 {
3002 struct target_ops *t;
3003
3004 for (t = &current_target; t != NULL; t = t->beneath)
3005 if (t->to_supports_disable_randomization)
3006 return t->to_supports_disable_randomization (t);
3007
3008 return 0;
3009 }
3010
3011 char *
3012 target_get_osdata (const char *type)
3013 {
3014 struct target_ops *t;
3015
3016 /* If we're already connected to something that can get us OS
3017 related data, use it. Otherwise, try using the native
3018 target. */
3019 if (current_target.to_stratum >= process_stratum)
3020 t = current_target.beneath;
3021 else
3022 t = find_default_run_target ("get OS data");
3023
3024 if (!t)
3025 return NULL;
3026
3027 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3028 }
3029
3030 /* Determine the current address space of thread PTID. */
3031
3032 struct address_space *
3033 target_thread_address_space (ptid_t ptid)
3034 {
3035 struct address_space *aspace;
3036 struct inferior *inf;
3037 struct target_ops *t;
3038
3039 for (t = current_target.beneath; t != NULL; t = t->beneath)
3040 {
3041 if (t->to_thread_address_space != NULL)
3042 {
3043 aspace = t->to_thread_address_space (t, ptid);
3044 gdb_assert (aspace);
3045
3046 if (targetdebug)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "target_thread_address_space (%s) = %d\n",
3049 target_pid_to_str (ptid),
3050 address_space_num (aspace));
3051 return aspace;
3052 }
3053 }
3054
3055 /* Fall-back to the "main" address space of the inferior. */
3056 inf = find_inferior_pid (ptid_get_pid (ptid));
3057
3058 if (inf == NULL || inf->aspace == NULL)
3059 internal_error (__FILE__, __LINE__,
3060 _("Can't determine the current "
3061 "address space of thread %s\n"),
3062 target_pid_to_str (ptid));
3063
3064 return inf->aspace;
3065 }
3066
3067
3068 /* Target file operations. */
3069
3070 static struct target_ops *
3071 default_fileio_target (void)
3072 {
3073 /* If we're already connected to something that can perform
3074 file I/O, use it. Otherwise, try using the native target. */
3075 if (current_target.to_stratum >= process_stratum)
3076 return current_target.beneath;
3077 else
3078 return find_default_run_target ("file I/O");
3079 }
3080
3081 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3082 target file descriptor, or -1 if an error occurs (and set
3083 *TARGET_ERRNO). */
3084 int
3085 target_fileio_open (const char *filename, int flags, int mode,
3086 int *target_errno)
3087 {
3088 struct target_ops *t;
3089
3090 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3091 {
3092 if (t->to_fileio_open != NULL)
3093 {
3094 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3095
3096 if (targetdebug)
3097 fprintf_unfiltered (gdb_stdlog,
3098 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3099 filename, flags, mode,
3100 fd, fd != -1 ? 0 : *target_errno);
3101 return fd;
3102 }
3103 }
3104
3105 *target_errno = FILEIO_ENOSYS;
3106 return -1;
3107 }
3108
3109 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3110 Return the number of bytes written, or -1 if an error occurs
3111 (and set *TARGET_ERRNO). */
3112 int
3113 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3114 ULONGEST offset, int *target_errno)
3115 {
3116 struct target_ops *t;
3117
3118 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3119 {
3120 if (t->to_fileio_pwrite != NULL)
3121 {
3122 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3123 target_errno);
3124
3125 if (targetdebug)
3126 fprintf_unfiltered (gdb_stdlog,
3127 "target_fileio_pwrite (%d,...,%d,%s) "
3128 "= %d (%d)\n",
3129 fd, len, pulongest (offset),
3130 ret, ret != -1 ? 0 : *target_errno);
3131 return ret;
3132 }
3133 }
3134
3135 *target_errno = FILEIO_ENOSYS;
3136 return -1;
3137 }
3138
3139 /* Read up to LEN bytes FD on the target into READ_BUF.
3140 Return the number of bytes read, or -1 if an error occurs
3141 (and set *TARGET_ERRNO). */
3142 int
3143 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3144 ULONGEST offset, int *target_errno)
3145 {
3146 struct target_ops *t;
3147
3148 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3149 {
3150 if (t->to_fileio_pread != NULL)
3151 {
3152 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3153 target_errno);
3154
3155 if (targetdebug)
3156 fprintf_unfiltered (gdb_stdlog,
3157 "target_fileio_pread (%d,...,%d,%s) "
3158 "= %d (%d)\n",
3159 fd, len, pulongest (offset),
3160 ret, ret != -1 ? 0 : *target_errno);
3161 return ret;
3162 }
3163 }
3164
3165 *target_errno = FILEIO_ENOSYS;
3166 return -1;
3167 }
3168
3169 /* Close FD on the target. Return 0, or -1 if an error occurs
3170 (and set *TARGET_ERRNO). */
3171 int
3172 target_fileio_close (int fd, int *target_errno)
3173 {
3174 struct target_ops *t;
3175
3176 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3177 {
3178 if (t->to_fileio_close != NULL)
3179 {
3180 int ret = t->to_fileio_close (t, fd, target_errno);
3181
3182 if (targetdebug)
3183 fprintf_unfiltered (gdb_stdlog,
3184 "target_fileio_close (%d) = %d (%d)\n",
3185 fd, ret, ret != -1 ? 0 : *target_errno);
3186 return ret;
3187 }
3188 }
3189
3190 *target_errno = FILEIO_ENOSYS;
3191 return -1;
3192 }
3193
3194 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3195 occurs (and set *TARGET_ERRNO). */
3196 int
3197 target_fileio_unlink (const char *filename, int *target_errno)
3198 {
3199 struct target_ops *t;
3200
3201 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3202 {
3203 if (t->to_fileio_unlink != NULL)
3204 {
3205 int ret = t->to_fileio_unlink (t, filename, target_errno);
3206
3207 if (targetdebug)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "target_fileio_unlink (%s) = %d (%d)\n",
3210 filename, ret, ret != -1 ? 0 : *target_errno);
3211 return ret;
3212 }
3213 }
3214
3215 *target_errno = FILEIO_ENOSYS;
3216 return -1;
3217 }
3218
3219 /* Read value of symbolic link FILENAME on the target. Return a
3220 null-terminated string allocated via xmalloc, or NULL if an error
3221 occurs (and set *TARGET_ERRNO). */
3222 char *
3223 target_fileio_readlink (const char *filename, int *target_errno)
3224 {
3225 struct target_ops *t;
3226
3227 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3228 {
3229 if (t->to_fileio_readlink != NULL)
3230 {
3231 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3232
3233 if (targetdebug)
3234 fprintf_unfiltered (gdb_stdlog,
3235 "target_fileio_readlink (%s) = %s (%d)\n",
3236 filename, ret? ret : "(nil)",
3237 ret? 0 : *target_errno);
3238 return ret;
3239 }
3240 }
3241
3242 *target_errno = FILEIO_ENOSYS;
3243 return NULL;
3244 }
3245
3246 static void
3247 target_fileio_close_cleanup (void *opaque)
3248 {
3249 int fd = *(int *) opaque;
3250 int target_errno;
3251
3252 target_fileio_close (fd, &target_errno);
3253 }
3254
3255 /* Read target file FILENAME. Store the result in *BUF_P and
3256 return the size of the transferred data. PADDING additional bytes are
3257 available in *BUF_P. This is a helper function for
3258 target_fileio_read_alloc; see the declaration of that function for more
3259 information. */
3260
3261 static LONGEST
3262 target_fileio_read_alloc_1 (const char *filename,
3263 gdb_byte **buf_p, int padding)
3264 {
3265 struct cleanup *close_cleanup;
3266 size_t buf_alloc, buf_pos;
3267 gdb_byte *buf;
3268 LONGEST n;
3269 int fd;
3270 int target_errno;
3271
3272 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3273 if (fd == -1)
3274 return -1;
3275
3276 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3277
3278 /* Start by reading up to 4K at a time. The target will throttle
3279 this number down if necessary. */
3280 buf_alloc = 4096;
3281 buf = xmalloc (buf_alloc);
3282 buf_pos = 0;
3283 while (1)
3284 {
3285 n = target_fileio_pread (fd, &buf[buf_pos],
3286 buf_alloc - buf_pos - padding, buf_pos,
3287 &target_errno);
3288 if (n < 0)
3289 {
3290 /* An error occurred. */
3291 do_cleanups (close_cleanup);
3292 xfree (buf);
3293 return -1;
3294 }
3295 else if (n == 0)
3296 {
3297 /* Read all there was. */
3298 do_cleanups (close_cleanup);
3299 if (buf_pos == 0)
3300 xfree (buf);
3301 else
3302 *buf_p = buf;
3303 return buf_pos;
3304 }
3305
3306 buf_pos += n;
3307
3308 /* If the buffer is filling up, expand it. */
3309 if (buf_alloc < buf_pos * 2)
3310 {
3311 buf_alloc *= 2;
3312 buf = xrealloc (buf, buf_alloc);
3313 }
3314
3315 QUIT;
3316 }
3317 }
3318
3319 /* Read target file FILENAME. Store the result in *BUF_P and return
3320 the size of the transferred data. See the declaration in "target.h"
3321 function for more information about the return value. */
3322
3323 LONGEST
3324 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3325 {
3326 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3327 }
3328
3329 /* Read target file FILENAME. The result is NUL-terminated and
3330 returned as a string, allocated using xmalloc. If an error occurs
3331 or the transfer is unsupported, NULL is returned. Empty objects
3332 are returned as allocated but empty strings. A warning is issued
3333 if the result contains any embedded NUL bytes. */
3334
3335 char *
3336 target_fileio_read_stralloc (const char *filename)
3337 {
3338 gdb_byte *buffer;
3339 char *bufstr;
3340 LONGEST i, transferred;
3341
3342 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3343 bufstr = (char *) buffer;
3344
3345 if (transferred < 0)
3346 return NULL;
3347
3348 if (transferred == 0)
3349 return xstrdup ("");
3350
3351 bufstr[transferred] = 0;
3352
3353 /* Check for embedded NUL bytes; but allow trailing NULs. */
3354 for (i = strlen (bufstr); i < transferred; i++)
3355 if (bufstr[i] != 0)
3356 {
3357 warning (_("target file %s "
3358 "contained unexpected null characters"),
3359 filename);
3360 break;
3361 }
3362
3363 return bufstr;
3364 }
3365
3366
3367 static int
3368 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3369 CORE_ADDR addr, int len)
3370 {
3371 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3372 }
3373
3374 static int
3375 default_watchpoint_addr_within_range (struct target_ops *target,
3376 CORE_ADDR addr,
3377 CORE_ADDR start, int length)
3378 {
3379 return addr >= start && addr < start + length;
3380 }
3381
3382 static struct gdbarch *
3383 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3384 {
3385 return target_gdbarch ();
3386 }
3387
3388 static int
3389 return_zero (void)
3390 {
3391 return 0;
3392 }
3393
3394 /*
3395 * Find the next target down the stack from the specified target.
3396 */
3397
3398 struct target_ops *
3399 find_target_beneath (struct target_ops *t)
3400 {
3401 return t->beneath;
3402 }
3403
3404 /* See target.h. */
3405
3406 struct target_ops *
3407 find_target_at (enum strata stratum)
3408 {
3409 struct target_ops *t;
3410
3411 for (t = current_target.beneath; t != NULL; t = t->beneath)
3412 if (t->to_stratum == stratum)
3413 return t;
3414
3415 return NULL;
3416 }
3417
3418 \f
3419 /* The inferior process has died. Long live the inferior! */
3420
3421 void
3422 generic_mourn_inferior (void)
3423 {
3424 ptid_t ptid;
3425
3426 ptid = inferior_ptid;
3427 inferior_ptid = null_ptid;
3428
3429 /* Mark breakpoints uninserted in case something tries to delete a
3430 breakpoint while we delete the inferior's threads (which would
3431 fail, since the inferior is long gone). */
3432 mark_breakpoints_out ();
3433
3434 if (!ptid_equal (ptid, null_ptid))
3435 {
3436 int pid = ptid_get_pid (ptid);
3437 exit_inferior (pid);
3438 }
3439
3440 /* Note this wipes step-resume breakpoints, so needs to be done
3441 after exit_inferior, which ends up referencing the step-resume
3442 breakpoints through clear_thread_inferior_resources. */
3443 breakpoint_init_inferior (inf_exited);
3444
3445 registers_changed ();
3446
3447 reopen_exec_file ();
3448 reinit_frame_cache ();
3449
3450 if (deprecated_detach_hook)
3451 deprecated_detach_hook ();
3452 }
3453 \f
3454 /* Convert a normal process ID to a string. Returns the string in a
3455 static buffer. */
3456
3457 char *
3458 normal_pid_to_str (ptid_t ptid)
3459 {
3460 static char buf[32];
3461
3462 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3463 return buf;
3464 }
3465
3466 static char *
3467 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3468 {
3469 return normal_pid_to_str (ptid);
3470 }
3471
3472 /* Error-catcher for target_find_memory_regions. */
3473 static int
3474 dummy_find_memory_regions (struct target_ops *self,
3475 find_memory_region_ftype ignore1, void *ignore2)
3476 {
3477 error (_("Command not implemented for this target."));
3478 return 0;
3479 }
3480
3481 /* Error-catcher for target_make_corefile_notes. */
3482 static char *
3483 dummy_make_corefile_notes (struct target_ops *self,
3484 bfd *ignore1, int *ignore2)
3485 {
3486 error (_("Command not implemented for this target."));
3487 return NULL;
3488 }
3489
3490 /* Set up the handful of non-empty slots needed by the dummy target
3491 vector. */
3492
3493 static void
3494 init_dummy_target (void)
3495 {
3496 dummy_target.to_shortname = "None";
3497 dummy_target.to_longname = "None";
3498 dummy_target.to_doc = "";
3499 dummy_target.to_create_inferior = find_default_create_inferior;
3500 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3501 dummy_target.to_supports_disable_randomization
3502 = find_default_supports_disable_randomization;
3503 dummy_target.to_stratum = dummy_stratum;
3504 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3505 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3506 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3507 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3508 dummy_target.to_has_execution
3509 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3510 dummy_target.to_magic = OPS_MAGIC;
3511
3512 install_dummy_methods (&dummy_target);
3513 }
3514 \f
3515 static void
3516 debug_to_open (char *args, int from_tty)
3517 {
3518 debug_target.to_open (args, from_tty);
3519
3520 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3521 }
3522
3523 void
3524 target_close (struct target_ops *targ)
3525 {
3526 gdb_assert (!target_is_pushed (targ));
3527
3528 if (targ->to_xclose != NULL)
3529 targ->to_xclose (targ);
3530 else if (targ->to_close != NULL)
3531 targ->to_close (targ);
3532
3533 if (targetdebug)
3534 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3535 }
3536
3537 void
3538 target_attach (char *args, int from_tty)
3539 {
3540 current_target.to_attach (&current_target, args, from_tty);
3541 if (targetdebug)
3542 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3543 args, from_tty);
3544 }
3545
3546 int
3547 target_thread_alive (ptid_t ptid)
3548 {
3549 int retval;
3550
3551 retval = current_target.to_thread_alive (&current_target, ptid);
3552 if (targetdebug)
3553 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3554 ptid_get_pid (ptid), retval);
3555
3556 return retval;
3557 }
3558
3559 void
3560 target_find_new_threads (void)
3561 {
3562 current_target.to_find_new_threads (&current_target);
3563 if (targetdebug)
3564 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3565 }
3566
3567 void
3568 target_stop (ptid_t ptid)
3569 {
3570 if (!may_stop)
3571 {
3572 warning (_("May not interrupt or stop the target, ignoring attempt"));
3573 return;
3574 }
3575
3576 (*current_target.to_stop) (&current_target, ptid);
3577 }
3578
3579 static void
3580 debug_to_post_attach (struct target_ops *self, int pid)
3581 {
3582 debug_target.to_post_attach (&debug_target, pid);
3583
3584 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3585 }
3586
3587 /* Concatenate ELEM to LIST, a comma separate list, and return the
3588 result. The LIST incoming argument is released. */
3589
3590 static char *
3591 str_comma_list_concat_elem (char *list, const char *elem)
3592 {
3593 if (list == NULL)
3594 return xstrdup (elem);
3595 else
3596 return reconcat (list, list, ", ", elem, (char *) NULL);
3597 }
3598
3599 /* Helper for target_options_to_string. If OPT is present in
3600 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3601 Returns the new resulting string. OPT is removed from
3602 TARGET_OPTIONS. */
3603
3604 static char *
3605 do_option (int *target_options, char *ret,
3606 int opt, char *opt_str)
3607 {
3608 if ((*target_options & opt) != 0)
3609 {
3610 ret = str_comma_list_concat_elem (ret, opt_str);
3611 *target_options &= ~opt;
3612 }
3613
3614 return ret;
3615 }
3616
3617 char *
3618 target_options_to_string (int target_options)
3619 {
3620 char *ret = NULL;
3621
3622 #define DO_TARG_OPTION(OPT) \
3623 ret = do_option (&target_options, ret, OPT, #OPT)
3624
3625 DO_TARG_OPTION (TARGET_WNOHANG);
3626
3627 if (target_options != 0)
3628 ret = str_comma_list_concat_elem (ret, "unknown???");
3629
3630 if (ret == NULL)
3631 ret = xstrdup ("");
3632 return ret;
3633 }
3634
3635 static void
3636 debug_print_register (const char * func,
3637 struct regcache *regcache, int regno)
3638 {
3639 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3640
3641 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3642 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3643 && gdbarch_register_name (gdbarch, regno) != NULL
3644 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3645 fprintf_unfiltered (gdb_stdlog, "(%s)",
3646 gdbarch_register_name (gdbarch, regno));
3647 else
3648 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3649 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3650 {
3651 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3652 int i, size = register_size (gdbarch, regno);
3653 gdb_byte buf[MAX_REGISTER_SIZE];
3654
3655 regcache_raw_collect (regcache, regno, buf);
3656 fprintf_unfiltered (gdb_stdlog, " = ");
3657 for (i = 0; i < size; i++)
3658 {
3659 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3660 }
3661 if (size <= sizeof (LONGEST))
3662 {
3663 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3664
3665 fprintf_unfiltered (gdb_stdlog, " %s %s",
3666 core_addr_to_string_nz (val), plongest (val));
3667 }
3668 }
3669 fprintf_unfiltered (gdb_stdlog, "\n");
3670 }
3671
3672 void
3673 target_fetch_registers (struct regcache *regcache, int regno)
3674 {
3675 current_target.to_fetch_registers (&current_target, regcache, regno);
3676 if (targetdebug)
3677 debug_print_register ("target_fetch_registers", regcache, regno);
3678 }
3679
3680 void
3681 target_store_registers (struct regcache *regcache, int regno)
3682 {
3683 struct target_ops *t;
3684
3685 if (!may_write_registers)
3686 error (_("Writing to registers is not allowed (regno %d)"), regno);
3687
3688 current_target.to_store_registers (&current_target, regcache, regno);
3689 if (targetdebug)
3690 {
3691 debug_print_register ("target_store_registers", regcache, regno);
3692 }
3693 }
3694
3695 int
3696 target_core_of_thread (ptid_t ptid)
3697 {
3698 int retval = current_target.to_core_of_thread (&current_target, ptid);
3699
3700 if (targetdebug)
3701 fprintf_unfiltered (gdb_stdlog,
3702 "target_core_of_thread (%d) = %d\n",
3703 ptid_get_pid (ptid), retval);
3704 return retval;
3705 }
3706
3707 int
3708 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3709 {
3710 int retval = current_target.to_verify_memory (&current_target,
3711 data, memaddr, size);
3712
3713 if (targetdebug)
3714 fprintf_unfiltered (gdb_stdlog,
3715 "target_verify_memory (%s, %s) = %d\n",
3716 paddress (target_gdbarch (), memaddr),
3717 pulongest (size),
3718 retval);
3719 return retval;
3720 }
3721
3722 /* The documentation for this function is in its prototype declaration in
3723 target.h. */
3724
3725 int
3726 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3727 {
3728 int ret;
3729
3730 ret = current_target.to_insert_mask_watchpoint (&current_target,
3731 addr, mask, rw);
3732
3733 if (targetdebug)
3734 fprintf_unfiltered (gdb_stdlog, "\
3735 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3736 core_addr_to_string (addr),
3737 core_addr_to_string (mask), rw, ret);
3738
3739 return ret;
3740 }
3741
3742 /* The documentation for this function is in its prototype declaration in
3743 target.h. */
3744
3745 int
3746 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3747 {
3748 int ret;
3749
3750 ret = current_target.to_remove_mask_watchpoint (&current_target,
3751 addr, mask, rw);
3752
3753 if (targetdebug)
3754 fprintf_unfiltered (gdb_stdlog, "\
3755 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3756 core_addr_to_string (addr),
3757 core_addr_to_string (mask), rw, ret);
3758
3759 return ret;
3760 }
3761
3762 /* The documentation for this function is in its prototype declaration
3763 in target.h. */
3764
3765 int
3766 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3767 {
3768 return current_target.to_masked_watch_num_registers (&current_target,
3769 addr, mask);
3770 }
3771
3772 /* The documentation for this function is in its prototype declaration
3773 in target.h. */
3774
3775 int
3776 target_ranged_break_num_registers (void)
3777 {
3778 return current_target.to_ranged_break_num_registers (&current_target);
3779 }
3780
3781 /* See target.h. */
3782
3783 struct btrace_target_info *
3784 target_enable_btrace (ptid_t ptid)
3785 {
3786 struct target_ops *t;
3787
3788 for (t = current_target.beneath; t != NULL; t = t->beneath)
3789 if (t->to_enable_btrace != NULL)
3790 return t->to_enable_btrace (t, ptid);
3791
3792 tcomplain ();
3793 return NULL;
3794 }
3795
3796 /* See target.h. */
3797
3798 void
3799 target_disable_btrace (struct btrace_target_info *btinfo)
3800 {
3801 struct target_ops *t;
3802
3803 for (t = current_target.beneath; t != NULL; t = t->beneath)
3804 if (t->to_disable_btrace != NULL)
3805 {
3806 t->to_disable_btrace (t, btinfo);
3807 return;
3808 }
3809
3810 tcomplain ();
3811 }
3812
3813 /* See target.h. */
3814
3815 void
3816 target_teardown_btrace (struct btrace_target_info *btinfo)
3817 {
3818 struct target_ops *t;
3819
3820 for (t = current_target.beneath; t != NULL; t = t->beneath)
3821 if (t->to_teardown_btrace != NULL)
3822 {
3823 t->to_teardown_btrace (t, btinfo);
3824 return;
3825 }
3826
3827 tcomplain ();
3828 }
3829
3830 /* See target.h. */
3831
3832 enum btrace_error
3833 target_read_btrace (VEC (btrace_block_s) **btrace,
3834 struct btrace_target_info *btinfo,
3835 enum btrace_read_type type)
3836 {
3837 struct target_ops *t;
3838
3839 for (t = current_target.beneath; t != NULL; t = t->beneath)
3840 if (t->to_read_btrace != NULL)
3841 return t->to_read_btrace (t, btrace, btinfo, type);
3842
3843 tcomplain ();
3844 return BTRACE_ERR_NOT_SUPPORTED;
3845 }
3846
3847 /* See target.h. */
3848
3849 void
3850 target_stop_recording (void)
3851 {
3852 struct target_ops *t;
3853
3854 for (t = current_target.beneath; t != NULL; t = t->beneath)
3855 if (t->to_stop_recording != NULL)
3856 {
3857 t->to_stop_recording (t);
3858 return;
3859 }
3860
3861 /* This is optional. */
3862 }
3863
3864 /* See target.h. */
3865
3866 void
3867 target_info_record (void)
3868 {
3869 struct target_ops *t;
3870
3871 for (t = current_target.beneath; t != NULL; t = t->beneath)
3872 if (t->to_info_record != NULL)
3873 {
3874 t->to_info_record (t);
3875 return;
3876 }
3877
3878 tcomplain ();
3879 }
3880
3881 /* See target.h. */
3882
3883 void
3884 target_save_record (const char *filename)
3885 {
3886 current_target.to_save_record (&current_target, filename);
3887 }
3888
3889 /* See target.h. */
3890
3891 int
3892 target_supports_delete_record (void)
3893 {
3894 struct target_ops *t;
3895
3896 for (t = current_target.beneath; t != NULL; t = t->beneath)
3897 if (t->to_delete_record != NULL)
3898 return 1;
3899
3900 return 0;
3901 }
3902
3903 /* See target.h. */
3904
3905 void
3906 target_delete_record (void)
3907 {
3908 current_target.to_delete_record (&current_target);
3909 }
3910
3911 /* See target.h. */
3912
3913 int
3914 target_record_is_replaying (void)
3915 {
3916 return current_target.to_record_is_replaying (&current_target);
3917 }
3918
3919 /* See target.h. */
3920
3921 void
3922 target_goto_record_begin (void)
3923 {
3924 current_target.to_goto_record_begin (&current_target);
3925 }
3926
3927 /* See target.h. */
3928
3929 void
3930 target_goto_record_end (void)
3931 {
3932 current_target.to_goto_record_end (&current_target);
3933 }
3934
3935 /* See target.h. */
3936
3937 void
3938 target_goto_record (ULONGEST insn)
3939 {
3940 current_target.to_goto_record (&current_target, insn);
3941 }
3942
3943 /* See target.h. */
3944
3945 void
3946 target_insn_history (int size, int flags)
3947 {
3948 current_target.to_insn_history (&current_target, size, flags);
3949 }
3950
3951 /* See target.h. */
3952
3953 void
3954 target_insn_history_from (ULONGEST from, int size, int flags)
3955 {
3956 current_target.to_insn_history_from (&current_target, from, size, flags);
3957 }
3958
3959 /* See target.h. */
3960
3961 void
3962 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3963 {
3964 current_target.to_insn_history_range (&current_target, begin, end, flags);
3965 }
3966
3967 /* See target.h. */
3968
3969 void
3970 target_call_history (int size, int flags)
3971 {
3972 current_target.to_call_history (&current_target, size, flags);
3973 }
3974
3975 /* See target.h. */
3976
3977 void
3978 target_call_history_from (ULONGEST begin, int size, int flags)
3979 {
3980 current_target.to_call_history_from (&current_target, begin, size, flags);
3981 }
3982
3983 /* See target.h. */
3984
3985 void
3986 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3987 {
3988 current_target.to_call_history_range (&current_target, begin, end, flags);
3989 }
3990
3991 static void
3992 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3993 {
3994 debug_target.to_prepare_to_store (&debug_target, regcache);
3995
3996 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3997 }
3998
3999 /* See target.h. */
4000
4001 const struct frame_unwind *
4002 target_get_unwinder (void)
4003 {
4004 struct target_ops *t;
4005
4006 for (t = current_target.beneath; t != NULL; t = t->beneath)
4007 if (t->to_get_unwinder != NULL)
4008 return t->to_get_unwinder;
4009
4010 return NULL;
4011 }
4012
4013 /* See target.h. */
4014
4015 const struct frame_unwind *
4016 target_get_tailcall_unwinder (void)
4017 {
4018 struct target_ops *t;
4019
4020 for (t = current_target.beneath; t != NULL; t = t->beneath)
4021 if (t->to_get_tailcall_unwinder != NULL)
4022 return t->to_get_tailcall_unwinder;
4023
4024 return NULL;
4025 }
4026
4027 /* See target.h. */
4028
4029 CORE_ADDR
4030 forward_target_decr_pc_after_break (struct target_ops *ops,
4031 struct gdbarch *gdbarch)
4032 {
4033 for (; ops != NULL; ops = ops->beneath)
4034 if (ops->to_decr_pc_after_break != NULL)
4035 return ops->to_decr_pc_after_break (ops, gdbarch);
4036
4037 return gdbarch_decr_pc_after_break (gdbarch);
4038 }
4039
4040 /* See target.h. */
4041
4042 CORE_ADDR
4043 target_decr_pc_after_break (struct gdbarch *gdbarch)
4044 {
4045 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4046 }
4047
4048 static int
4049 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4050 int write, struct mem_attrib *attrib,
4051 struct target_ops *target)
4052 {
4053 int retval;
4054
4055 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4056 attrib, target);
4057
4058 fprintf_unfiltered (gdb_stdlog,
4059 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4060 paddress (target_gdbarch (), memaddr), len,
4061 write ? "write" : "read", retval);
4062
4063 if (retval > 0)
4064 {
4065 int i;
4066
4067 fputs_unfiltered (", bytes =", gdb_stdlog);
4068 for (i = 0; i < retval; i++)
4069 {
4070 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4071 {
4072 if (targetdebug < 2 && i > 0)
4073 {
4074 fprintf_unfiltered (gdb_stdlog, " ...");
4075 break;
4076 }
4077 fprintf_unfiltered (gdb_stdlog, "\n");
4078 }
4079
4080 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4081 }
4082 }
4083
4084 fputc_unfiltered ('\n', gdb_stdlog);
4085
4086 return retval;
4087 }
4088
4089 static void
4090 debug_to_files_info (struct target_ops *target)
4091 {
4092 debug_target.to_files_info (target);
4093
4094 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4095 }
4096
4097 static int
4098 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4099 struct bp_target_info *bp_tgt)
4100 {
4101 int retval;
4102
4103 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4104
4105 fprintf_unfiltered (gdb_stdlog,
4106 "target_insert_breakpoint (%s, xxx) = %ld\n",
4107 core_addr_to_string (bp_tgt->placed_address),
4108 (unsigned long) retval);
4109 return retval;
4110 }
4111
4112 static int
4113 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4114 struct bp_target_info *bp_tgt)
4115 {
4116 int retval;
4117
4118 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4119
4120 fprintf_unfiltered (gdb_stdlog,
4121 "target_remove_breakpoint (%s, xxx) = %ld\n",
4122 core_addr_to_string (bp_tgt->placed_address),
4123 (unsigned long) retval);
4124 return retval;
4125 }
4126
4127 static int
4128 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4129 int type, int cnt, int from_tty)
4130 {
4131 int retval;
4132
4133 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4134 type, cnt, from_tty);
4135
4136 fprintf_unfiltered (gdb_stdlog,
4137 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4138 (unsigned long) type,
4139 (unsigned long) cnt,
4140 (unsigned long) from_tty,
4141 (unsigned long) retval);
4142 return retval;
4143 }
4144
4145 static int
4146 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4147 CORE_ADDR addr, int len)
4148 {
4149 CORE_ADDR retval;
4150
4151 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4152 addr, len);
4153
4154 fprintf_unfiltered (gdb_stdlog,
4155 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4156 core_addr_to_string (addr), (unsigned long) len,
4157 core_addr_to_string (retval));
4158 return retval;
4159 }
4160
4161 static int
4162 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4163 CORE_ADDR addr, int len, int rw,
4164 struct expression *cond)
4165 {
4166 int retval;
4167
4168 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4169 addr, len,
4170 rw, cond);
4171
4172 fprintf_unfiltered (gdb_stdlog,
4173 "target_can_accel_watchpoint_condition "
4174 "(%s, %d, %d, %s) = %ld\n",
4175 core_addr_to_string (addr), len, rw,
4176 host_address_to_string (cond), (unsigned long) retval);
4177 return retval;
4178 }
4179
4180 static int
4181 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4182 {
4183 int retval;
4184
4185 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4186
4187 fprintf_unfiltered (gdb_stdlog,
4188 "target_stopped_by_watchpoint () = %ld\n",
4189 (unsigned long) retval);
4190 return retval;
4191 }
4192
4193 static int
4194 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4195 {
4196 int retval;
4197
4198 retval = debug_target.to_stopped_data_address (target, addr);
4199
4200 fprintf_unfiltered (gdb_stdlog,
4201 "target_stopped_data_address ([%s]) = %ld\n",
4202 core_addr_to_string (*addr),
4203 (unsigned long)retval);
4204 return retval;
4205 }
4206
4207 static int
4208 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4209 CORE_ADDR addr,
4210 CORE_ADDR start, int length)
4211 {
4212 int retval;
4213
4214 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4215 start, length);
4216
4217 fprintf_filtered (gdb_stdlog,
4218 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4219 core_addr_to_string (addr), core_addr_to_string (start),
4220 length, retval);
4221 return retval;
4222 }
4223
4224 static int
4225 debug_to_insert_hw_breakpoint (struct target_ops *self,
4226 struct gdbarch *gdbarch,
4227 struct bp_target_info *bp_tgt)
4228 {
4229 int retval;
4230
4231 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4232 gdbarch, bp_tgt);
4233
4234 fprintf_unfiltered (gdb_stdlog,
4235 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4236 core_addr_to_string (bp_tgt->placed_address),
4237 (unsigned long) retval);
4238 return retval;
4239 }
4240
4241 static int
4242 debug_to_remove_hw_breakpoint (struct target_ops *self,
4243 struct gdbarch *gdbarch,
4244 struct bp_target_info *bp_tgt)
4245 {
4246 int retval;
4247
4248 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4249 gdbarch, bp_tgt);
4250
4251 fprintf_unfiltered (gdb_stdlog,
4252 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4253 core_addr_to_string (bp_tgt->placed_address),
4254 (unsigned long) retval);
4255 return retval;
4256 }
4257
4258 static int
4259 debug_to_insert_watchpoint (struct target_ops *self,
4260 CORE_ADDR addr, int len, int type,
4261 struct expression *cond)
4262 {
4263 int retval;
4264
4265 retval = debug_target.to_insert_watchpoint (&debug_target,
4266 addr, len, type, cond);
4267
4268 fprintf_unfiltered (gdb_stdlog,
4269 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4270 core_addr_to_string (addr), len, type,
4271 host_address_to_string (cond), (unsigned long) retval);
4272 return retval;
4273 }
4274
4275 static int
4276 debug_to_remove_watchpoint (struct target_ops *self,
4277 CORE_ADDR addr, int len, int type,
4278 struct expression *cond)
4279 {
4280 int retval;
4281
4282 retval = debug_target.to_remove_watchpoint (&debug_target,
4283 addr, len, type, cond);
4284
4285 fprintf_unfiltered (gdb_stdlog,
4286 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4287 core_addr_to_string (addr), len, type,
4288 host_address_to_string (cond), (unsigned long) retval);
4289 return retval;
4290 }
4291
4292 static void
4293 debug_to_terminal_init (struct target_ops *self)
4294 {
4295 debug_target.to_terminal_init (&debug_target);
4296
4297 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4298 }
4299
4300 static void
4301 debug_to_terminal_inferior (struct target_ops *self)
4302 {
4303 debug_target.to_terminal_inferior (&debug_target);
4304
4305 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4306 }
4307
4308 static void
4309 debug_to_terminal_ours_for_output (struct target_ops *self)
4310 {
4311 debug_target.to_terminal_ours_for_output (&debug_target);
4312
4313 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4314 }
4315
4316 static void
4317 debug_to_terminal_ours (struct target_ops *self)
4318 {
4319 debug_target.to_terminal_ours (&debug_target);
4320
4321 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4322 }
4323
4324 static void
4325 debug_to_terminal_save_ours (struct target_ops *self)
4326 {
4327 debug_target.to_terminal_save_ours (&debug_target);
4328
4329 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4330 }
4331
4332 static void
4333 debug_to_terminal_info (struct target_ops *self,
4334 const char *arg, int from_tty)
4335 {
4336 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4337
4338 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4339 from_tty);
4340 }
4341
4342 static void
4343 debug_to_load (struct target_ops *self, char *args, int from_tty)
4344 {
4345 debug_target.to_load (&debug_target, args, from_tty);
4346
4347 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4348 }
4349
4350 static void
4351 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4352 {
4353 debug_target.to_post_startup_inferior (&debug_target, ptid);
4354
4355 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4356 ptid_get_pid (ptid));
4357 }
4358
4359 static int
4360 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4361 {
4362 int retval;
4363
4364 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4365
4366 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4367 pid, retval);
4368
4369 return retval;
4370 }
4371
4372 static int
4373 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4374 {
4375 int retval;
4376
4377 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4378
4379 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4380 pid, retval);
4381
4382 return retval;
4383 }
4384
4385 static int
4386 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4387 {
4388 int retval;
4389
4390 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4391
4392 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4393 pid, retval);
4394
4395 return retval;
4396 }
4397
4398 static int
4399 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4400 {
4401 int retval;
4402
4403 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4404
4405 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4406 pid, retval);
4407
4408 return retval;
4409 }
4410
4411 static int
4412 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4413 {
4414 int retval;
4415
4416 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4417
4418 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4419 pid, retval);
4420
4421 return retval;
4422 }
4423
4424 static int
4425 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4426 {
4427 int retval;
4428
4429 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4430
4431 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4432 pid, retval);
4433
4434 return retval;
4435 }
4436
4437 static int
4438 debug_to_has_exited (struct target_ops *self,
4439 int pid, int wait_status, int *exit_status)
4440 {
4441 int has_exited;
4442
4443 has_exited = debug_target.to_has_exited (&debug_target,
4444 pid, wait_status, exit_status);
4445
4446 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4447 pid, wait_status, *exit_status, has_exited);
4448
4449 return has_exited;
4450 }
4451
4452 static int
4453 debug_to_can_run (struct target_ops *self)
4454 {
4455 int retval;
4456
4457 retval = debug_target.to_can_run (&debug_target);
4458
4459 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4460
4461 return retval;
4462 }
4463
4464 static struct gdbarch *
4465 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4466 {
4467 struct gdbarch *retval;
4468
4469 retval = debug_target.to_thread_architecture (ops, ptid);
4470
4471 fprintf_unfiltered (gdb_stdlog,
4472 "target_thread_architecture (%s) = %s [%s]\n",
4473 target_pid_to_str (ptid),
4474 host_address_to_string (retval),
4475 gdbarch_bfd_arch_info (retval)->printable_name);
4476 return retval;
4477 }
4478
4479 static void
4480 debug_to_stop (struct target_ops *self, ptid_t ptid)
4481 {
4482 debug_target.to_stop (&debug_target, ptid);
4483
4484 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4485 target_pid_to_str (ptid));
4486 }
4487
4488 static void
4489 debug_to_rcmd (struct target_ops *self, char *command,
4490 struct ui_file *outbuf)
4491 {
4492 debug_target.to_rcmd (&debug_target, command, outbuf);
4493 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4494 }
4495
4496 static char *
4497 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4498 {
4499 char *exec_file;
4500
4501 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4502
4503 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4504 pid, exec_file);
4505
4506 return exec_file;
4507 }
4508
4509 static void
4510 setup_target_debug (void)
4511 {
4512 memcpy (&debug_target, &current_target, sizeof debug_target);
4513
4514 current_target.to_open = debug_to_open;
4515 current_target.to_post_attach = debug_to_post_attach;
4516 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4517 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4518 current_target.to_files_info = debug_to_files_info;
4519 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4520 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4521 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4522 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4523 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4524 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4525 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4526 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4527 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4528 current_target.to_watchpoint_addr_within_range
4529 = debug_to_watchpoint_addr_within_range;
4530 current_target.to_region_ok_for_hw_watchpoint
4531 = debug_to_region_ok_for_hw_watchpoint;
4532 current_target.to_can_accel_watchpoint_condition
4533 = debug_to_can_accel_watchpoint_condition;
4534 current_target.to_terminal_init = debug_to_terminal_init;
4535 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4536 current_target.to_terminal_ours_for_output
4537 = debug_to_terminal_ours_for_output;
4538 current_target.to_terminal_ours = debug_to_terminal_ours;
4539 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4540 current_target.to_terminal_info = debug_to_terminal_info;
4541 current_target.to_load = debug_to_load;
4542 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4543 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4544 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4545 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4546 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4547 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4548 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4549 current_target.to_has_exited = debug_to_has_exited;
4550 current_target.to_can_run = debug_to_can_run;
4551 current_target.to_stop = debug_to_stop;
4552 current_target.to_rcmd = debug_to_rcmd;
4553 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4554 current_target.to_thread_architecture = debug_to_thread_architecture;
4555 }
4556 \f
4557
4558 static char targ_desc[] =
4559 "Names of targets and files being debugged.\nShows the entire \
4560 stack of targets currently in use (including the exec-file,\n\
4561 core-file, and process, if any), as well as the symbol file name.";
4562
4563 static void
4564 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4565 {
4566 error (_("\"monitor\" command not supported by this target."));
4567 }
4568
4569 static void
4570 do_monitor_command (char *cmd,
4571 int from_tty)
4572 {
4573 target_rcmd (cmd, gdb_stdtarg);
4574 }
4575
4576 /* Print the name of each layers of our target stack. */
4577
4578 static void
4579 maintenance_print_target_stack (char *cmd, int from_tty)
4580 {
4581 struct target_ops *t;
4582
4583 printf_filtered (_("The current target stack is:\n"));
4584
4585 for (t = target_stack; t != NULL; t = t->beneath)
4586 {
4587 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4588 }
4589 }
4590
4591 /* Controls if async mode is permitted. */
4592 int target_async_permitted = 0;
4593
4594 /* The set command writes to this variable. If the inferior is
4595 executing, target_async_permitted is *not* updated. */
4596 static int target_async_permitted_1 = 0;
4597
4598 static void
4599 set_target_async_command (char *args, int from_tty,
4600 struct cmd_list_element *c)
4601 {
4602 if (have_live_inferiors ())
4603 {
4604 target_async_permitted_1 = target_async_permitted;
4605 error (_("Cannot change this setting while the inferior is running."));
4606 }
4607
4608 target_async_permitted = target_async_permitted_1;
4609 }
4610
4611 static void
4612 show_target_async_command (struct ui_file *file, int from_tty,
4613 struct cmd_list_element *c,
4614 const char *value)
4615 {
4616 fprintf_filtered (file,
4617 _("Controlling the inferior in "
4618 "asynchronous mode is %s.\n"), value);
4619 }
4620
4621 /* Temporary copies of permission settings. */
4622
4623 static int may_write_registers_1 = 1;
4624 static int may_write_memory_1 = 1;
4625 static int may_insert_breakpoints_1 = 1;
4626 static int may_insert_tracepoints_1 = 1;
4627 static int may_insert_fast_tracepoints_1 = 1;
4628 static int may_stop_1 = 1;
4629
4630 /* Make the user-set values match the real values again. */
4631
4632 void
4633 update_target_permissions (void)
4634 {
4635 may_write_registers_1 = may_write_registers;
4636 may_write_memory_1 = may_write_memory;
4637 may_insert_breakpoints_1 = may_insert_breakpoints;
4638 may_insert_tracepoints_1 = may_insert_tracepoints;
4639 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4640 may_stop_1 = may_stop;
4641 }
4642
4643 /* The one function handles (most of) the permission flags in the same
4644 way. */
4645
4646 static void
4647 set_target_permissions (char *args, int from_tty,
4648 struct cmd_list_element *c)
4649 {
4650 if (target_has_execution)
4651 {
4652 update_target_permissions ();
4653 error (_("Cannot change this setting while the inferior is running."));
4654 }
4655
4656 /* Make the real values match the user-changed values. */
4657 may_write_registers = may_write_registers_1;
4658 may_insert_breakpoints = may_insert_breakpoints_1;
4659 may_insert_tracepoints = may_insert_tracepoints_1;
4660 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4661 may_stop = may_stop_1;
4662 update_observer_mode ();
4663 }
4664
4665 /* Set memory write permission independently of observer mode. */
4666
4667 static void
4668 set_write_memory_permission (char *args, int from_tty,
4669 struct cmd_list_element *c)
4670 {
4671 /* Make the real values match the user-changed values. */
4672 may_write_memory = may_write_memory_1;
4673 update_observer_mode ();
4674 }
4675
4676
4677 void
4678 initialize_targets (void)
4679 {
4680 init_dummy_target ();
4681 push_target (&dummy_target);
4682
4683 add_info ("target", target_info, targ_desc);
4684 add_info ("files", target_info, targ_desc);
4685
4686 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4687 Set target debugging."), _("\
4688 Show target debugging."), _("\
4689 When non-zero, target debugging is enabled. Higher numbers are more\n\
4690 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4691 command."),
4692 NULL,
4693 show_targetdebug,
4694 &setdebuglist, &showdebuglist);
4695
4696 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4697 &trust_readonly, _("\
4698 Set mode for reading from readonly sections."), _("\
4699 Show mode for reading from readonly sections."), _("\
4700 When this mode is on, memory reads from readonly sections (such as .text)\n\
4701 will be read from the object file instead of from the target. This will\n\
4702 result in significant performance improvement for remote targets."),
4703 NULL,
4704 show_trust_readonly,
4705 &setlist, &showlist);
4706
4707 add_com ("monitor", class_obscure, do_monitor_command,
4708 _("Send a command to the remote monitor (remote targets only)."));
4709
4710 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4711 _("Print the name of each layer of the internal target stack."),
4712 &maintenanceprintlist);
4713
4714 add_setshow_boolean_cmd ("target-async", no_class,
4715 &target_async_permitted_1, _("\
4716 Set whether gdb controls the inferior in asynchronous mode."), _("\
4717 Show whether gdb controls the inferior in asynchronous mode."), _("\
4718 Tells gdb whether to control the inferior in asynchronous mode."),
4719 set_target_async_command,
4720 show_target_async_command,
4721 &setlist,
4722 &showlist);
4723
4724 add_setshow_boolean_cmd ("may-write-registers", class_support,
4725 &may_write_registers_1, _("\
4726 Set permission to write into registers."), _("\
4727 Show permission to write into registers."), _("\
4728 When this permission is on, GDB may write into the target's registers.\n\
4729 Otherwise, any sort of write attempt will result in an error."),
4730 set_target_permissions, NULL,
4731 &setlist, &showlist);
4732
4733 add_setshow_boolean_cmd ("may-write-memory", class_support,
4734 &may_write_memory_1, _("\
4735 Set permission to write into target memory."), _("\
4736 Show permission to write into target memory."), _("\
4737 When this permission is on, GDB may write into the target's memory.\n\
4738 Otherwise, any sort of write attempt will result in an error."),
4739 set_write_memory_permission, NULL,
4740 &setlist, &showlist);
4741
4742 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4743 &may_insert_breakpoints_1, _("\
4744 Set permission to insert breakpoints in the target."), _("\
4745 Show permission to insert breakpoints in the target."), _("\
4746 When this permission is on, GDB may insert breakpoints in the program.\n\
4747 Otherwise, any sort of insertion attempt will result in an error."),
4748 set_target_permissions, NULL,
4749 &setlist, &showlist);
4750
4751 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4752 &may_insert_tracepoints_1, _("\
4753 Set permission to insert tracepoints in the target."), _("\
4754 Show permission to insert tracepoints in the target."), _("\
4755 When this permission is on, GDB may insert tracepoints in the program.\n\
4756 Otherwise, any sort of insertion attempt will result in an error."),
4757 set_target_permissions, NULL,
4758 &setlist, &showlist);
4759
4760 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4761 &may_insert_fast_tracepoints_1, _("\
4762 Set permission to insert fast tracepoints in the target."), _("\
4763 Show permission to insert fast tracepoints in the target."), _("\
4764 When this permission is on, GDB may insert fast tracepoints.\n\
4765 Otherwise, any sort of insertion attempt will result in an error."),
4766 set_target_permissions, NULL,
4767 &setlist, &showlist);
4768
4769 add_setshow_boolean_cmd ("may-interrupt", class_support,
4770 &may_stop_1, _("\
4771 Set permission to interrupt or signal the target."), _("\
4772 Show permission to interrupt or signal the target."), _("\
4773 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4774 Otherwise, any attempt to interrupt or stop will be ignored."),
4775 set_target_permissions, NULL,
4776 &setlist, &showlist);
4777 }
This page took 0.20331 seconds and 4 git commands to generate.