44900ed995d66d3e206673ef2479c24dc5cfb831
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static int return_minus_one (void);
70
71 static void *return_null (void);
72
73 void target_ignore (void);
74
75 static void target_command (char *, int);
76
77 static struct target_ops *find_default_run_target (char *);
78
79 static target_xfer_partial_ftype default_xfer_partial;
80
81 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
82 ptid_t ptid);
83
84 static int dummy_find_memory_regions (struct target_ops *self,
85 find_memory_region_ftype ignore1,
86 void *ignore2);
87
88 static char *dummy_make_corefile_notes (struct target_ops *self,
89 bfd *ignore1, int *ignore2);
90
91 static int find_default_can_async_p (struct target_ops *ignore);
92
93 static int find_default_is_async_p (struct target_ops *ignore);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 #include "target-delegates.c"
99
100 static void init_dummy_target (void);
101
102 static struct target_ops debug_target;
103
104 static void debug_to_open (char *, int);
105
106 static void debug_to_prepare_to_store (struct target_ops *self,
107 struct regcache *);
108
109 static void debug_to_files_info (struct target_ops *);
110
111 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
118 int, int, int);
119
120 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_remove_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
137
138 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
139 CORE_ADDR, CORE_ADDR, int);
140
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
142 CORE_ADDR, int);
143
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static void debug_to_terminal_init (struct target_ops *self);
149
150 static void debug_to_terminal_inferior (struct target_ops *self);
151
152 static void debug_to_terminal_ours_for_output (struct target_ops *self);
153
154 static void debug_to_terminal_save_ours (struct target_ops *self);
155
156 static void debug_to_terminal_ours (struct target_ops *self);
157
158 static void debug_to_load (struct target_ops *self, char *, int);
159
160 static int debug_to_can_run (struct target_ops *self);
161
162 static void debug_to_stop (struct target_ops *self, ptid_t);
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_allocsize;
170 #define DEFAULT_ALLOCSIZE 10
171
172 /* The initial current target, so that there is always a semi-valid
173 current target. */
174
175 static struct target_ops dummy_target;
176
177 /* Top of target stack. */
178
179 static struct target_ops *target_stack;
180
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
183
184 struct target_ops current_target;
185
186 /* Command list for target. */
187
188 static struct cmd_list_element *targetlist = NULL;
189
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
192
193 static int trust_readonly = 0;
194
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
197
198 static int show_memory_breakpoints = 0;
199
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
203
204 int may_write_registers = 1;
205
206 int may_write_memory = 1;
207
208 int may_insert_breakpoints = 1;
209
210 int may_insert_tracepoints = 1;
211
212 int may_insert_fast_tracepoints = 1;
213
214 int may_stop = 1;
215
216 /* Non-zero if we want to see trace of target level stuff. */
217
218 static unsigned int targetdebug = 0;
219 static void
220 show_targetdebug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
224 }
225
226 static void setup_target_debug (void);
227
228 /* The user just typed 'target' without the name of a target. */
229
230 static void
231 target_command (char *arg, int from_tty)
232 {
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
234 gdb_stdout);
235 }
236
237 /* Default target_has_* methods for process_stratum targets. */
238
239 int
240 default_child_has_all_memory (struct target_ops *ops)
241 {
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_memory (struct target_ops *ops)
251 {
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_stack (struct target_ops *ops)
261 {
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269 int
270 default_child_has_registers (struct target_ops *ops)
271 {
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
281 {
282 /* If there's no thread selected, then we can't make it run through
283 hoops. */
284 if (ptid_equal (the_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290
291 int
292 target_has_all_memory_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_all_memory (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_memory_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_memory (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_stack_1 (void)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_stack (t))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_registers_1 (void)
329 {
330 struct target_ops *t;
331
332 for (t = current_target.beneath; t != NULL; t = t->beneath)
333 if (t->to_has_registers (t))
334 return 1;
335
336 return 0;
337 }
338
339 int
340 target_has_execution_1 (ptid_t the_ptid)
341 {
342 struct target_ops *t;
343
344 for (t = current_target.beneath; t != NULL; t = t->beneath)
345 if (t->to_has_execution (t, the_ptid))
346 return 1;
347
348 return 0;
349 }
350
351 int
352 target_has_execution_current (void)
353 {
354 return target_has_execution_1 (inferior_ptid);
355 }
356
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
359
360 void
361 complete_target_initialization (struct target_ops *t)
362 {
363 /* Provide default values for all "must have" methods. */
364 if (t->to_xfer_partial == NULL)
365 t->to_xfer_partial = default_xfer_partial;
366
367 if (t->to_has_all_memory == NULL)
368 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_memory == NULL)
371 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_stack == NULL)
374 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_registers == NULL)
377 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_execution == NULL)
380 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
381
382 install_delegators (t);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 complete_target_initialization (t);
396
397 if (!target_structs)
398 {
399 target_struct_allocsize = DEFAULT_ALLOCSIZE;
400 target_structs = (struct target_ops **) xmalloc
401 (target_struct_allocsize * sizeof (*target_structs));
402 }
403 if (target_struct_size >= target_struct_allocsize)
404 {
405 target_struct_allocsize *= 2;
406 target_structs = (struct target_ops **)
407 xrealloc ((char *) target_structs,
408 target_struct_allocsize * sizeof (*target_structs));
409 }
410 target_structs[target_struct_size++] = t;
411
412 if (targetlist == NULL)
413 add_prefix_cmd ("target", class_run, target_command, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist, "target ", 0, &cmdlist);
420 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
421 &targetlist);
422 if (completer != NULL)
423 set_cmd_completer (c, completer);
424 }
425
426 /* Add a possible target architecture to the list. */
427
428 void
429 add_target (struct target_ops *t)
430 {
431 add_target_with_completer (t, NULL);
432 }
433
434 /* See target.h. */
435
436 void
437 add_deprecated_target_alias (struct target_ops *t, char *alias)
438 {
439 struct cmd_list_element *c;
440 char *alt;
441
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 see PR cli/15104. */
444 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
445 alt = xstrprintf ("target %s", t->to_shortname);
446 deprecate_cmd (c, alt);
447 }
448
449 /* Stub functions */
450
451 void
452 target_ignore (void)
453 {
454 }
455
456 void
457 target_kill (void)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 if (t->to_kill != NULL)
463 {
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
466
467 t->to_kill (t);
468 return;
469 }
470
471 noprocess ();
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory, t);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 /* Do not inherit to_enable_tracepoint. */
696 /* Do not inherit to_disable_tracepoint. */
697 /* Do not inherit to_trace_set_readonly_regions. */
698 /* Do not inherit to_trace_start. */
699 /* Do not inherit to_get_trace_status. */
700 /* Do not inherit to_get_tracepoint_status. */
701 /* Do not inherit to_trace_stop. */
702 /* Do not inherit to_trace_find. */
703 /* Do not inherit to_get_trace_state_variable_value. */
704 /* Do not inherit to_save_trace_data. */
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_augmented_libraries_svr4_read, t);
721 INHERIT (to_magic, t);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
723 INHERIT (to_can_run_breakpoint_commands, t);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
727 }
728 #undef INHERIT
729
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
733
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
737
738 de_fault (to_open,
739 (void (*) (char *, int))
740 tcomplain);
741 de_fault (to_close,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (deprecated_xfer_memory,
745 (int (*) (CORE_ADDR, gdb_byte *, int, int,
746 struct mem_attrib *, struct target_ops *))
747 nomemory);
748 de_fault (to_can_run,
749 (int (*) (struct target_ops *))
750 return_zero);
751 de_fault (to_stop,
752 (void (*) (struct target_ops *, ptid_t))
753 target_ignore);
754 current_target.to_read_description = NULL;
755 de_fault (to_upload_tracepoints,
756 (int (*) (struct target_ops *, struct uploaded_tp **))
757 return_zero);
758 de_fault (to_upload_trace_state_variables,
759 (int (*) (struct target_ops *, struct uploaded_tsv **))
760 return_zero);
761 de_fault (to_get_raw_trace_data,
762 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
763 tcomplain);
764 de_fault (to_get_min_fast_tracepoint_insn_len,
765 (int (*) (struct target_ops *))
766 return_minus_one);
767 de_fault (to_set_disconnected_tracing,
768 (void (*) (struct target_ops *, int))
769 target_ignore);
770 de_fault (to_set_circular_trace_buffer,
771 (void (*) (struct target_ops *, int))
772 target_ignore);
773 de_fault (to_set_trace_buffer_size,
774 (void (*) (struct target_ops *, LONGEST))
775 target_ignore);
776 de_fault (to_set_trace_notes,
777 (int (*) (struct target_ops *,
778 const char *, const char *, const char *))
779 return_zero);
780 de_fault (to_get_tib_address,
781 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
782 tcomplain);
783 de_fault (to_set_permissions,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_static_tracepoint_marker_at,
787 (int (*) (struct target_ops *,
788 CORE_ADDR, struct static_tracepoint_marker *))
789 return_zero);
790 de_fault (to_static_tracepoint_markers_by_strid,
791 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
792 const char *))
793 tcomplain);
794 de_fault (to_traceframe_info,
795 (struct traceframe_info * (*) (struct target_ops *))
796 return_null);
797 de_fault (to_supports_evaluation_of_breakpoint_conditions,
798 (int (*) (struct target_ops *))
799 return_zero);
800 de_fault (to_can_run_breakpoint_commands,
801 (int (*) (struct target_ops *))
802 return_zero);
803 de_fault (to_use_agent,
804 (int (*) (struct target_ops *, int))
805 tcomplain);
806 de_fault (to_can_use_agent,
807 (int (*) (struct target_ops *))
808 return_zero);
809 de_fault (to_augmented_libraries_svr4_read,
810 (int (*) (struct target_ops *))
811 return_zero);
812
813 #undef de_fault
814
815 /* Finally, position the target-stack beneath the squashed
816 "current_target". That way code looking for a non-inherited
817 target method can quickly and simply find it. */
818 current_target.beneath = target_stack;
819
820 if (targetdebug)
821 setup_target_debug ();
822 }
823
824 /* Push a new target type into the stack of the existing target accessors,
825 possibly superseding some of the existing accessors.
826
827 Rather than allow an empty stack, we always have the dummy target at
828 the bottom stratum, so we can call the function vectors without
829 checking them. */
830
831 void
832 push_target (struct target_ops *t)
833 {
834 struct target_ops **cur;
835
836 /* Check magic number. If wrong, it probably means someone changed
837 the struct definition, but not all the places that initialize one. */
838 if (t->to_magic != OPS_MAGIC)
839 {
840 fprintf_unfiltered (gdb_stderr,
841 "Magic number of %s target struct wrong\n",
842 t->to_shortname);
843 internal_error (__FILE__, __LINE__,
844 _("failed internal consistency check"));
845 }
846
847 /* Find the proper stratum to install this target in. */
848 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
849 {
850 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
851 break;
852 }
853
854 /* If there's already targets at this stratum, remove them. */
855 /* FIXME: cagney/2003-10-15: I think this should be popping all
856 targets to CUR, and not just those at this stratum level. */
857 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
858 {
859 /* There's already something at this stratum level. Close it,
860 and un-hook it from the stack. */
861 struct target_ops *tmp = (*cur);
862
863 (*cur) = (*cur)->beneath;
864 tmp->beneath = NULL;
865 target_close (tmp);
866 }
867
868 /* We have removed all targets in our stratum, now add the new one. */
869 t->beneath = (*cur);
870 (*cur) = t;
871
872 update_current_target ();
873 }
874
875 /* Remove a target_ops vector from the stack, wherever it may be.
876 Return how many times it was removed (0 or 1). */
877
878 int
879 unpush_target (struct target_ops *t)
880 {
881 struct target_ops **cur;
882 struct target_ops *tmp;
883
884 if (t->to_stratum == dummy_stratum)
885 internal_error (__FILE__, __LINE__,
886 _("Attempt to unpush the dummy target"));
887
888 /* Look for the specified target. Note that we assume that a target
889 can only occur once in the target stack. */
890
891 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
892 {
893 if ((*cur) == t)
894 break;
895 }
896
897 /* If we don't find target_ops, quit. Only open targets should be
898 closed. */
899 if ((*cur) == NULL)
900 return 0;
901
902 /* Unchain the target. */
903 tmp = (*cur);
904 (*cur) = (*cur)->beneath;
905 tmp->beneath = NULL;
906
907 update_current_target ();
908
909 /* Finally close the target. Note we do this after unchaining, so
910 any target method calls from within the target_close
911 implementation don't end up in T anymore. */
912 target_close (t);
913
914 return 1;
915 }
916
917 void
918 pop_all_targets_above (enum strata above_stratum)
919 {
920 while ((int) (current_target.to_stratum) > (int) above_stratum)
921 {
922 if (!unpush_target (target_stack))
923 {
924 fprintf_unfiltered (gdb_stderr,
925 "pop_all_targets couldn't find target %s\n",
926 target_stack->to_shortname);
927 internal_error (__FILE__, __LINE__,
928 _("failed internal consistency check"));
929 break;
930 }
931 }
932 }
933
934 void
935 pop_all_targets (void)
936 {
937 pop_all_targets_above (dummy_stratum);
938 }
939
940 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
941
942 int
943 target_is_pushed (struct target_ops *t)
944 {
945 struct target_ops **cur;
946
947 /* Check magic number. If wrong, it probably means someone changed
948 the struct definition, but not all the places that initialize one. */
949 if (t->to_magic != OPS_MAGIC)
950 {
951 fprintf_unfiltered (gdb_stderr,
952 "Magic number of %s target struct wrong\n",
953 t->to_shortname);
954 internal_error (__FILE__, __LINE__,
955 _("failed internal consistency check"));
956 }
957
958 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
959 if (*cur == t)
960 return 1;
961
962 return 0;
963 }
964
965 /* Using the objfile specified in OBJFILE, find the address for the
966 current thread's thread-local storage with offset OFFSET. */
967 CORE_ADDR
968 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
969 {
970 volatile CORE_ADDR addr = 0;
971 struct target_ops *target;
972
973 for (target = current_target.beneath;
974 target != NULL;
975 target = target->beneath)
976 {
977 if (target->to_get_thread_local_address != NULL)
978 break;
979 }
980
981 if (target != NULL
982 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
983 {
984 ptid_t ptid = inferior_ptid;
985 volatile struct gdb_exception ex;
986
987 TRY_CATCH (ex, RETURN_MASK_ALL)
988 {
989 CORE_ADDR lm_addr;
990
991 /* Fetch the load module address for this objfile. */
992 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
993 objfile);
994 /* If it's 0, throw the appropriate exception. */
995 if (lm_addr == 0)
996 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
997 _("TLS load module not found"));
998
999 addr = target->to_get_thread_local_address (target, ptid,
1000 lm_addr, offset);
1001 }
1002 /* If an error occurred, print TLS related messages here. Otherwise,
1003 throw the error to some higher catcher. */
1004 if (ex.reason < 0)
1005 {
1006 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1007
1008 switch (ex.error)
1009 {
1010 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1011 error (_("Cannot find thread-local variables "
1012 "in this thread library."));
1013 break;
1014 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1015 if (objfile_is_library)
1016 error (_("Cannot find shared library `%s' in dynamic"
1017 " linker's load module list"), objfile_name (objfile));
1018 else
1019 error (_("Cannot find executable file `%s' in dynamic"
1020 " linker's load module list"), objfile_name (objfile));
1021 break;
1022 case TLS_NOT_ALLOCATED_YET_ERROR:
1023 if (objfile_is_library)
1024 error (_("The inferior has not yet allocated storage for"
1025 " thread-local variables in\n"
1026 "the shared library `%s'\n"
1027 "for %s"),
1028 objfile_name (objfile), target_pid_to_str (ptid));
1029 else
1030 error (_("The inferior has not yet allocated storage for"
1031 " thread-local variables in\n"
1032 "the executable `%s'\n"
1033 "for %s"),
1034 objfile_name (objfile), target_pid_to_str (ptid));
1035 break;
1036 case TLS_GENERIC_ERROR:
1037 if (objfile_is_library)
1038 error (_("Cannot find thread-local storage for %s, "
1039 "shared library %s:\n%s"),
1040 target_pid_to_str (ptid),
1041 objfile_name (objfile), ex.message);
1042 else
1043 error (_("Cannot find thread-local storage for %s, "
1044 "executable file %s:\n%s"),
1045 target_pid_to_str (ptid),
1046 objfile_name (objfile), ex.message);
1047 break;
1048 default:
1049 throw_exception (ex);
1050 break;
1051 }
1052 }
1053 }
1054 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1055 TLS is an ABI-specific thing. But we don't do that yet. */
1056 else
1057 error (_("Cannot find thread-local variables on this target"));
1058
1059 return addr;
1060 }
1061
1062 const char *
1063 target_xfer_status_to_string (enum target_xfer_status err)
1064 {
1065 #define CASE(X) case X: return #X
1066 switch (err)
1067 {
1068 CASE(TARGET_XFER_E_IO);
1069 CASE(TARGET_XFER_E_UNAVAILABLE);
1070 default:
1071 return "<unknown>";
1072 }
1073 #undef CASE
1074 };
1075
1076
1077 #undef MIN
1078 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1079
1080 /* target_read_string -- read a null terminated string, up to LEN bytes,
1081 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1082 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1083 is responsible for freeing it. Return the number of bytes successfully
1084 read. */
1085
1086 int
1087 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1088 {
1089 int tlen, offset, i;
1090 gdb_byte buf[4];
1091 int errcode = 0;
1092 char *buffer;
1093 int buffer_allocated;
1094 char *bufptr;
1095 unsigned int nbytes_read = 0;
1096
1097 gdb_assert (string);
1098
1099 /* Small for testing. */
1100 buffer_allocated = 4;
1101 buffer = xmalloc (buffer_allocated);
1102 bufptr = buffer;
1103
1104 while (len > 0)
1105 {
1106 tlen = MIN (len, 4 - (memaddr & 3));
1107 offset = memaddr & 3;
1108
1109 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1110 if (errcode != 0)
1111 {
1112 /* The transfer request might have crossed the boundary to an
1113 unallocated region of memory. Retry the transfer, requesting
1114 a single byte. */
1115 tlen = 1;
1116 offset = 0;
1117 errcode = target_read_memory (memaddr, buf, 1);
1118 if (errcode != 0)
1119 goto done;
1120 }
1121
1122 if (bufptr - buffer + tlen > buffer_allocated)
1123 {
1124 unsigned int bytes;
1125
1126 bytes = bufptr - buffer;
1127 buffer_allocated *= 2;
1128 buffer = xrealloc (buffer, buffer_allocated);
1129 bufptr = buffer + bytes;
1130 }
1131
1132 for (i = 0; i < tlen; i++)
1133 {
1134 *bufptr++ = buf[i + offset];
1135 if (buf[i + offset] == '\000')
1136 {
1137 nbytes_read += i + 1;
1138 goto done;
1139 }
1140 }
1141
1142 memaddr += tlen;
1143 len -= tlen;
1144 nbytes_read += tlen;
1145 }
1146 done:
1147 *string = buffer;
1148 if (errnop != NULL)
1149 *errnop = errcode;
1150 return nbytes_read;
1151 }
1152
1153 struct target_section_table *
1154 target_get_section_table (struct target_ops *target)
1155 {
1156 struct target_ops *t;
1157
1158 if (targetdebug)
1159 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1160
1161 for (t = target; t != NULL; t = t->beneath)
1162 if (t->to_get_section_table != NULL)
1163 return (*t->to_get_section_table) (t);
1164
1165 return NULL;
1166 }
1167
1168 /* Find a section containing ADDR. */
1169
1170 struct target_section *
1171 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1172 {
1173 struct target_section_table *table = target_get_section_table (target);
1174 struct target_section *secp;
1175
1176 if (table == NULL)
1177 return NULL;
1178
1179 for (secp = table->sections; secp < table->sections_end; secp++)
1180 {
1181 if (addr >= secp->addr && addr < secp->endaddr)
1182 return secp;
1183 }
1184 return NULL;
1185 }
1186
1187 /* Read memory from the live target, even if currently inspecting a
1188 traceframe. The return is the same as that of target_read. */
1189
1190 static enum target_xfer_status
1191 target_read_live_memory (enum target_object object,
1192 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1193 ULONGEST *xfered_len)
1194 {
1195 enum target_xfer_status ret;
1196 struct cleanup *cleanup;
1197
1198 /* Switch momentarily out of tfind mode so to access live memory.
1199 Note that this must not clear global state, such as the frame
1200 cache, which must still remain valid for the previous traceframe.
1201 We may be _building_ the frame cache at this point. */
1202 cleanup = make_cleanup_restore_traceframe_number ();
1203 set_traceframe_number (-1);
1204
1205 ret = target_xfer_partial (current_target.beneath, object, NULL,
1206 myaddr, NULL, memaddr, len, xfered_len);
1207
1208 do_cleanups (cleanup);
1209 return ret;
1210 }
1211
1212 /* Using the set of read-only target sections of OPS, read live
1213 read-only memory. Note that the actual reads start from the
1214 top-most target again.
1215
1216 For interface/parameters/return description see target.h,
1217 to_xfer_partial. */
1218
1219 static enum target_xfer_status
1220 memory_xfer_live_readonly_partial (struct target_ops *ops,
1221 enum target_object object,
1222 gdb_byte *readbuf, ULONGEST memaddr,
1223 ULONGEST len, ULONGEST *xfered_len)
1224 {
1225 struct target_section *secp;
1226 struct target_section_table *table;
1227
1228 secp = target_section_by_addr (ops, memaddr);
1229 if (secp != NULL
1230 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1231 secp->the_bfd_section)
1232 & SEC_READONLY))
1233 {
1234 struct target_section *p;
1235 ULONGEST memend = memaddr + len;
1236
1237 table = target_get_section_table (ops);
1238
1239 for (p = table->sections; p < table->sections_end; p++)
1240 {
1241 if (memaddr >= p->addr)
1242 {
1243 if (memend <= p->endaddr)
1244 {
1245 /* Entire transfer is within this section. */
1246 return target_read_live_memory (object, memaddr,
1247 readbuf, len, xfered_len);
1248 }
1249 else if (memaddr >= p->endaddr)
1250 {
1251 /* This section ends before the transfer starts. */
1252 continue;
1253 }
1254 else
1255 {
1256 /* This section overlaps the transfer. Just do half. */
1257 len = p->endaddr - memaddr;
1258 return target_read_live_memory (object, memaddr,
1259 readbuf, len, xfered_len);
1260 }
1261 }
1262 }
1263 }
1264
1265 return TARGET_XFER_EOF;
1266 }
1267
1268 /* Read memory from more than one valid target. A core file, for
1269 instance, could have some of memory but delegate other bits to
1270 the target below it. So, we must manually try all targets. */
1271
1272 static enum target_xfer_status
1273 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1274 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1275 ULONGEST *xfered_len)
1276 {
1277 enum target_xfer_status res;
1278
1279 do
1280 {
1281 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1282 readbuf, writebuf, memaddr, len,
1283 xfered_len);
1284 if (res == TARGET_XFER_OK)
1285 break;
1286
1287 /* Stop if the target reports that the memory is not available. */
1288 if (res == TARGET_XFER_E_UNAVAILABLE)
1289 break;
1290
1291 /* We want to continue past core files to executables, but not
1292 past a running target's memory. */
1293 if (ops->to_has_all_memory (ops))
1294 break;
1295
1296 ops = ops->beneath;
1297 }
1298 while (ops != NULL);
1299
1300 return res;
1301 }
1302
1303 /* Perform a partial memory transfer.
1304 For docs see target.h, to_xfer_partial. */
1305
1306 static enum target_xfer_status
1307 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1308 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1309 ULONGEST len, ULONGEST *xfered_len)
1310 {
1311 enum target_xfer_status res;
1312 int reg_len;
1313 struct mem_region *region;
1314 struct inferior *inf;
1315
1316 /* For accesses to unmapped overlay sections, read directly from
1317 files. Must do this first, as MEMADDR may need adjustment. */
1318 if (readbuf != NULL && overlay_debugging)
1319 {
1320 struct obj_section *section = find_pc_overlay (memaddr);
1321
1322 if (pc_in_unmapped_range (memaddr, section))
1323 {
1324 struct target_section_table *table
1325 = target_get_section_table (ops);
1326 const char *section_name = section->the_bfd_section->name;
1327
1328 memaddr = overlay_mapped_address (memaddr, section);
1329 return section_table_xfer_memory_partial (readbuf, writebuf,
1330 memaddr, len, xfered_len,
1331 table->sections,
1332 table->sections_end,
1333 section_name);
1334 }
1335 }
1336
1337 /* Try the executable files, if "trust-readonly-sections" is set. */
1338 if (readbuf != NULL && trust_readonly)
1339 {
1340 struct target_section *secp;
1341 struct target_section_table *table;
1342
1343 secp = target_section_by_addr (ops, memaddr);
1344 if (secp != NULL
1345 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1346 secp->the_bfd_section)
1347 & SEC_READONLY))
1348 {
1349 table = target_get_section_table (ops);
1350 return section_table_xfer_memory_partial (readbuf, writebuf,
1351 memaddr, len, xfered_len,
1352 table->sections,
1353 table->sections_end,
1354 NULL);
1355 }
1356 }
1357
1358 /* If reading unavailable memory in the context of traceframes, and
1359 this address falls within a read-only section, fallback to
1360 reading from live memory. */
1361 if (readbuf != NULL && get_traceframe_number () != -1)
1362 {
1363 VEC(mem_range_s) *available;
1364
1365 /* If we fail to get the set of available memory, then the
1366 target does not support querying traceframe info, and so we
1367 attempt reading from the traceframe anyway (assuming the
1368 target implements the old QTro packet then). */
1369 if (traceframe_available_memory (&available, memaddr, len))
1370 {
1371 struct cleanup *old_chain;
1372
1373 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1374
1375 if (VEC_empty (mem_range_s, available)
1376 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1377 {
1378 /* Don't read into the traceframe's available
1379 memory. */
1380 if (!VEC_empty (mem_range_s, available))
1381 {
1382 LONGEST oldlen = len;
1383
1384 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1385 gdb_assert (len <= oldlen);
1386 }
1387
1388 do_cleanups (old_chain);
1389
1390 /* This goes through the topmost target again. */
1391 res = memory_xfer_live_readonly_partial (ops, object,
1392 readbuf, memaddr,
1393 len, xfered_len);
1394 if (res == TARGET_XFER_OK)
1395 return TARGET_XFER_OK;
1396 else
1397 {
1398 /* No use trying further, we know some memory starting
1399 at MEMADDR isn't available. */
1400 *xfered_len = len;
1401 return TARGET_XFER_E_UNAVAILABLE;
1402 }
1403 }
1404
1405 /* Don't try to read more than how much is available, in
1406 case the target implements the deprecated QTro packet to
1407 cater for older GDBs (the target's knowledge of read-only
1408 sections may be outdated by now). */
1409 len = VEC_index (mem_range_s, available, 0)->length;
1410
1411 do_cleanups (old_chain);
1412 }
1413 }
1414
1415 /* Try GDB's internal data cache. */
1416 region = lookup_mem_region (memaddr);
1417 /* region->hi == 0 means there's no upper bound. */
1418 if (memaddr + len < region->hi || region->hi == 0)
1419 reg_len = len;
1420 else
1421 reg_len = region->hi - memaddr;
1422
1423 switch (region->attrib.mode)
1424 {
1425 case MEM_RO:
1426 if (writebuf != NULL)
1427 return TARGET_XFER_E_IO;
1428 break;
1429
1430 case MEM_WO:
1431 if (readbuf != NULL)
1432 return TARGET_XFER_E_IO;
1433 break;
1434
1435 case MEM_FLASH:
1436 /* We only support writing to flash during "load" for now. */
1437 if (writebuf != NULL)
1438 error (_("Writing to flash memory forbidden in this context"));
1439 break;
1440
1441 case MEM_NONE:
1442 return TARGET_XFER_E_IO;
1443 }
1444
1445 if (!ptid_equal (inferior_ptid, null_ptid))
1446 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1447 else
1448 inf = NULL;
1449
1450 if (inf != NULL
1451 /* The dcache reads whole cache lines; that doesn't play well
1452 with reading from a trace buffer, because reading outside of
1453 the collected memory range fails. */
1454 && get_traceframe_number () == -1
1455 && (region->attrib.cache
1456 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1457 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1458 {
1459 DCACHE *dcache = target_dcache_get_or_init ();
1460 int l;
1461
1462 if (readbuf != NULL)
1463 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1464 else
1465 /* FIXME drow/2006-08-09: If we're going to preserve const
1466 correctness dcache_xfer_memory should take readbuf and
1467 writebuf. */
1468 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1469 reg_len, 1);
1470 if (l <= 0)
1471 return TARGET_XFER_E_IO;
1472 else
1473 {
1474 *xfered_len = (ULONGEST) l;
1475 return TARGET_XFER_OK;
1476 }
1477 }
1478
1479 /* If none of those methods found the memory we wanted, fall back
1480 to a target partial transfer. Normally a single call to
1481 to_xfer_partial is enough; if it doesn't recognize an object
1482 it will call the to_xfer_partial of the next target down.
1483 But for memory this won't do. Memory is the only target
1484 object which can be read from more than one valid target.
1485 A core file, for instance, could have some of memory but
1486 delegate other bits to the target below it. So, we must
1487 manually try all targets. */
1488
1489 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1490 xfered_len);
1491
1492 /* Make sure the cache gets updated no matter what - if we are writing
1493 to the stack. Even if this write is not tagged as such, we still need
1494 to update the cache. */
1495
1496 if (res == TARGET_XFER_OK
1497 && inf != NULL
1498 && writebuf != NULL
1499 && target_dcache_init_p ()
1500 && !region->attrib.cache
1501 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1502 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1503 {
1504 DCACHE *dcache = target_dcache_get ();
1505
1506 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1507 }
1508
1509 /* If we still haven't got anything, return the last error. We
1510 give up. */
1511 return res;
1512 }
1513
1514 /* Perform a partial memory transfer. For docs see target.h,
1515 to_xfer_partial. */
1516
1517 static enum target_xfer_status
1518 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1519 gdb_byte *readbuf, const gdb_byte *writebuf,
1520 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1521 {
1522 enum target_xfer_status res;
1523
1524 /* Zero length requests are ok and require no work. */
1525 if (len == 0)
1526 return TARGET_XFER_EOF;
1527
1528 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1529 breakpoint insns, thus hiding out from higher layers whether
1530 there are software breakpoints inserted in the code stream. */
1531 if (readbuf != NULL)
1532 {
1533 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1534 xfered_len);
1535
1536 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1537 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1538 }
1539 else
1540 {
1541 void *buf;
1542 struct cleanup *old_chain;
1543
1544 /* A large write request is likely to be partially satisfied
1545 by memory_xfer_partial_1. We will continually malloc
1546 and free a copy of the entire write request for breakpoint
1547 shadow handling even though we only end up writing a small
1548 subset of it. Cap writes to 4KB to mitigate this. */
1549 len = min (4096, len);
1550
1551 buf = xmalloc (len);
1552 old_chain = make_cleanup (xfree, buf);
1553 memcpy (buf, writebuf, len);
1554
1555 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1556 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1557 xfered_len);
1558
1559 do_cleanups (old_chain);
1560 }
1561
1562 return res;
1563 }
1564
1565 static void
1566 restore_show_memory_breakpoints (void *arg)
1567 {
1568 show_memory_breakpoints = (uintptr_t) arg;
1569 }
1570
1571 struct cleanup *
1572 make_show_memory_breakpoints_cleanup (int show)
1573 {
1574 int current = show_memory_breakpoints;
1575
1576 show_memory_breakpoints = show;
1577 return make_cleanup (restore_show_memory_breakpoints,
1578 (void *) (uintptr_t) current);
1579 }
1580
1581 /* For docs see target.h, to_xfer_partial. */
1582
1583 enum target_xfer_status
1584 target_xfer_partial (struct target_ops *ops,
1585 enum target_object object, const char *annex,
1586 gdb_byte *readbuf, const gdb_byte *writebuf,
1587 ULONGEST offset, ULONGEST len,
1588 ULONGEST *xfered_len)
1589 {
1590 enum target_xfer_status retval;
1591
1592 gdb_assert (ops->to_xfer_partial != NULL);
1593
1594 /* Transfer is done when LEN is zero. */
1595 if (len == 0)
1596 return TARGET_XFER_EOF;
1597
1598 if (writebuf && !may_write_memory)
1599 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1600 core_addr_to_string_nz (offset), plongest (len));
1601
1602 *xfered_len = 0;
1603
1604 /* If this is a memory transfer, let the memory-specific code
1605 have a look at it instead. Memory transfers are more
1606 complicated. */
1607 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1608 || object == TARGET_OBJECT_CODE_MEMORY)
1609 retval = memory_xfer_partial (ops, object, readbuf,
1610 writebuf, offset, len, xfered_len);
1611 else if (object == TARGET_OBJECT_RAW_MEMORY)
1612 {
1613 /* Request the normal memory object from other layers. */
1614 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1615 xfered_len);
1616 }
1617 else
1618 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1619 writebuf, offset, len, xfered_len);
1620
1621 if (targetdebug)
1622 {
1623 const unsigned char *myaddr = NULL;
1624
1625 fprintf_unfiltered (gdb_stdlog,
1626 "%s:target_xfer_partial "
1627 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1628 ops->to_shortname,
1629 (int) object,
1630 (annex ? annex : "(null)"),
1631 host_address_to_string (readbuf),
1632 host_address_to_string (writebuf),
1633 core_addr_to_string_nz (offset),
1634 pulongest (len), retval,
1635 pulongest (*xfered_len));
1636
1637 if (readbuf)
1638 myaddr = readbuf;
1639 if (writebuf)
1640 myaddr = writebuf;
1641 if (retval == TARGET_XFER_OK && myaddr != NULL)
1642 {
1643 int i;
1644
1645 fputs_unfiltered (", bytes =", gdb_stdlog);
1646 for (i = 0; i < *xfered_len; i++)
1647 {
1648 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1649 {
1650 if (targetdebug < 2 && i > 0)
1651 {
1652 fprintf_unfiltered (gdb_stdlog, " ...");
1653 break;
1654 }
1655 fprintf_unfiltered (gdb_stdlog, "\n");
1656 }
1657
1658 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1659 }
1660 }
1661
1662 fputc_unfiltered ('\n', gdb_stdlog);
1663 }
1664
1665 /* Check implementations of to_xfer_partial update *XFERED_LEN
1666 properly. Do assertion after printing debug messages, so that we
1667 can find more clues on assertion failure from debugging messages. */
1668 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1669 gdb_assert (*xfered_len > 0);
1670
1671 return retval;
1672 }
1673
1674 /* Read LEN bytes of target memory at address MEMADDR, placing the
1675 results in GDB's memory at MYADDR. Returns either 0 for success or
1676 TARGET_XFER_E_IO if any error occurs.
1677
1678 If an error occurs, no guarantee is made about the contents of the data at
1679 MYADDR. In particular, the caller should not depend upon partial reads
1680 filling the buffer with good data. There is no way for the caller to know
1681 how much good data might have been transfered anyway. Callers that can
1682 deal with partial reads should call target_read (which will retry until
1683 it makes no progress, and then return how much was transferred). */
1684
1685 int
1686 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1687 {
1688 /* Dispatch to the topmost target, not the flattened current_target.
1689 Memory accesses check target->to_has_(all_)memory, and the
1690 flattened target doesn't inherit those. */
1691 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1692 myaddr, memaddr, len) == len)
1693 return 0;
1694 else
1695 return TARGET_XFER_E_IO;
1696 }
1697
1698 /* Like target_read_memory, but specify explicitly that this is a read
1699 from the target's raw memory. That is, this read bypasses the
1700 dcache, breakpoint shadowing, etc. */
1701
1702 int
1703 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1704 {
1705 /* See comment in target_read_memory about why the request starts at
1706 current_target.beneath. */
1707 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1708 myaddr, memaddr, len) == len)
1709 return 0;
1710 else
1711 return TARGET_XFER_E_IO;
1712 }
1713
1714 /* Like target_read_memory, but specify explicitly that this is a read from
1715 the target's stack. This may trigger different cache behavior. */
1716
1717 int
1718 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1719 {
1720 /* See comment in target_read_memory about why the request starts at
1721 current_target.beneath. */
1722 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1723 myaddr, memaddr, len) == len)
1724 return 0;
1725 else
1726 return TARGET_XFER_E_IO;
1727 }
1728
1729 /* Like target_read_memory, but specify explicitly that this is a read from
1730 the target's code. This may trigger different cache behavior. */
1731
1732 int
1733 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1734 {
1735 /* See comment in target_read_memory about why the request starts at
1736 current_target.beneath. */
1737 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1738 myaddr, memaddr, len) == len)
1739 return 0;
1740 else
1741 return TARGET_XFER_E_IO;
1742 }
1743
1744 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1745 Returns either 0 for success or TARGET_XFER_E_IO if any
1746 error occurs. If an error occurs, no guarantee is made about how
1747 much data got written. Callers that can deal with partial writes
1748 should call target_write. */
1749
1750 int
1751 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1752 {
1753 /* See comment in target_read_memory about why the request starts at
1754 current_target.beneath. */
1755 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1756 myaddr, memaddr, len) == len)
1757 return 0;
1758 else
1759 return TARGET_XFER_E_IO;
1760 }
1761
1762 /* Write LEN bytes from MYADDR to target raw memory at address
1763 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1764 if any error occurs. If an error occurs, no guarantee is made
1765 about how much data got written. Callers that can deal with
1766 partial writes should call target_write. */
1767
1768 int
1769 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1770 {
1771 /* See comment in target_read_memory about why the request starts at
1772 current_target.beneath. */
1773 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1774 myaddr, memaddr, len) == len)
1775 return 0;
1776 else
1777 return TARGET_XFER_E_IO;
1778 }
1779
1780 /* Fetch the target's memory map. */
1781
1782 VEC(mem_region_s) *
1783 target_memory_map (void)
1784 {
1785 VEC(mem_region_s) *result;
1786 struct mem_region *last_one, *this_one;
1787 int ix;
1788 struct target_ops *t;
1789
1790 if (targetdebug)
1791 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1792
1793 for (t = current_target.beneath; t != NULL; t = t->beneath)
1794 if (t->to_memory_map != NULL)
1795 break;
1796
1797 if (t == NULL)
1798 return NULL;
1799
1800 result = t->to_memory_map (t);
1801 if (result == NULL)
1802 return NULL;
1803
1804 qsort (VEC_address (mem_region_s, result),
1805 VEC_length (mem_region_s, result),
1806 sizeof (struct mem_region), mem_region_cmp);
1807
1808 /* Check that regions do not overlap. Simultaneously assign
1809 a numbering for the "mem" commands to use to refer to
1810 each region. */
1811 last_one = NULL;
1812 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1813 {
1814 this_one->number = ix;
1815
1816 if (last_one && last_one->hi > this_one->lo)
1817 {
1818 warning (_("Overlapping regions in memory map: ignoring"));
1819 VEC_free (mem_region_s, result);
1820 return NULL;
1821 }
1822 last_one = this_one;
1823 }
1824
1825 return result;
1826 }
1827
1828 void
1829 target_flash_erase (ULONGEST address, LONGEST length)
1830 {
1831 struct target_ops *t;
1832
1833 for (t = current_target.beneath; t != NULL; t = t->beneath)
1834 if (t->to_flash_erase != NULL)
1835 {
1836 if (targetdebug)
1837 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1838 hex_string (address), phex (length, 0));
1839 t->to_flash_erase (t, address, length);
1840 return;
1841 }
1842
1843 tcomplain ();
1844 }
1845
1846 void
1847 target_flash_done (void)
1848 {
1849 struct target_ops *t;
1850
1851 for (t = current_target.beneath; t != NULL; t = t->beneath)
1852 if (t->to_flash_done != NULL)
1853 {
1854 if (targetdebug)
1855 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1856 t->to_flash_done (t);
1857 return;
1858 }
1859
1860 tcomplain ();
1861 }
1862
1863 static void
1864 show_trust_readonly (struct ui_file *file, int from_tty,
1865 struct cmd_list_element *c, const char *value)
1866 {
1867 fprintf_filtered (file,
1868 _("Mode for reading from readonly sections is %s.\n"),
1869 value);
1870 }
1871
1872 /* More generic transfers. */
1873
1874 static enum target_xfer_status
1875 default_xfer_partial (struct target_ops *ops, enum target_object object,
1876 const char *annex, gdb_byte *readbuf,
1877 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1878 ULONGEST *xfered_len)
1879 {
1880 if (object == TARGET_OBJECT_MEMORY
1881 && ops->deprecated_xfer_memory != NULL)
1882 /* If available, fall back to the target's
1883 "deprecated_xfer_memory" method. */
1884 {
1885 int xfered = -1;
1886
1887 errno = 0;
1888 if (writebuf != NULL)
1889 {
1890 void *buffer = xmalloc (len);
1891 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1892
1893 memcpy (buffer, writebuf, len);
1894 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1895 1/*write*/, NULL, ops);
1896 do_cleanups (cleanup);
1897 }
1898 if (readbuf != NULL)
1899 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1900 0/*read*/, NULL, ops);
1901 if (xfered > 0)
1902 {
1903 *xfered_len = (ULONGEST) xfered;
1904 return TARGET_XFER_E_IO;
1905 }
1906 else if (xfered == 0 && errno == 0)
1907 /* "deprecated_xfer_memory" uses 0, cross checked against
1908 ERRNO as one indication of an error. */
1909 return TARGET_XFER_EOF;
1910 else
1911 return TARGET_XFER_E_IO;
1912 }
1913 else
1914 {
1915 gdb_assert (ops->beneath != NULL);
1916 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1917 readbuf, writebuf, offset, len,
1918 xfered_len);
1919 }
1920 }
1921
1922 /* Target vector read/write partial wrapper functions. */
1923
1924 static enum target_xfer_status
1925 target_read_partial (struct target_ops *ops,
1926 enum target_object object,
1927 const char *annex, gdb_byte *buf,
1928 ULONGEST offset, ULONGEST len,
1929 ULONGEST *xfered_len)
1930 {
1931 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1932 xfered_len);
1933 }
1934
1935 static enum target_xfer_status
1936 target_write_partial (struct target_ops *ops,
1937 enum target_object object,
1938 const char *annex, const gdb_byte *buf,
1939 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1940 {
1941 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1942 xfered_len);
1943 }
1944
1945 /* Wrappers to perform the full transfer. */
1946
1947 /* For docs on target_read see target.h. */
1948
1949 LONGEST
1950 target_read (struct target_ops *ops,
1951 enum target_object object,
1952 const char *annex, gdb_byte *buf,
1953 ULONGEST offset, LONGEST len)
1954 {
1955 LONGEST xfered = 0;
1956
1957 while (xfered < len)
1958 {
1959 ULONGEST xfered_len;
1960 enum target_xfer_status status;
1961
1962 status = target_read_partial (ops, object, annex,
1963 (gdb_byte *) buf + xfered,
1964 offset + xfered, len - xfered,
1965 &xfered_len);
1966
1967 /* Call an observer, notifying them of the xfer progress? */
1968 if (status == TARGET_XFER_EOF)
1969 return xfered;
1970 else if (status == TARGET_XFER_OK)
1971 {
1972 xfered += xfered_len;
1973 QUIT;
1974 }
1975 else
1976 return -1;
1977
1978 }
1979 return len;
1980 }
1981
1982 /* Assuming that the entire [begin, end) range of memory cannot be
1983 read, try to read whatever subrange is possible to read.
1984
1985 The function returns, in RESULT, either zero or one memory block.
1986 If there's a readable subrange at the beginning, it is completely
1987 read and returned. Any further readable subrange will not be read.
1988 Otherwise, if there's a readable subrange at the end, it will be
1989 completely read and returned. Any readable subranges before it
1990 (obviously, not starting at the beginning), will be ignored. In
1991 other cases -- either no readable subrange, or readable subrange(s)
1992 that is neither at the beginning, or end, nothing is returned.
1993
1994 The purpose of this function is to handle a read across a boundary
1995 of accessible memory in a case when memory map is not available.
1996 The above restrictions are fine for this case, but will give
1997 incorrect results if the memory is 'patchy'. However, supporting
1998 'patchy' memory would require trying to read every single byte,
1999 and it seems unacceptable solution. Explicit memory map is
2000 recommended for this case -- and target_read_memory_robust will
2001 take care of reading multiple ranges then. */
2002
2003 static void
2004 read_whatever_is_readable (struct target_ops *ops,
2005 ULONGEST begin, ULONGEST end,
2006 VEC(memory_read_result_s) **result)
2007 {
2008 gdb_byte *buf = xmalloc (end - begin);
2009 ULONGEST current_begin = begin;
2010 ULONGEST current_end = end;
2011 int forward;
2012 memory_read_result_s r;
2013 ULONGEST xfered_len;
2014
2015 /* If we previously failed to read 1 byte, nothing can be done here. */
2016 if (end - begin <= 1)
2017 {
2018 xfree (buf);
2019 return;
2020 }
2021
2022 /* Check that either first or the last byte is readable, and give up
2023 if not. This heuristic is meant to permit reading accessible memory
2024 at the boundary of accessible region. */
2025 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2026 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2027 {
2028 forward = 1;
2029 ++current_begin;
2030 }
2031 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2032 buf + (end-begin) - 1, end - 1, 1,
2033 &xfered_len) == TARGET_XFER_OK)
2034 {
2035 forward = 0;
2036 --current_end;
2037 }
2038 else
2039 {
2040 xfree (buf);
2041 return;
2042 }
2043
2044 /* Loop invariant is that the [current_begin, current_end) was previously
2045 found to be not readable as a whole.
2046
2047 Note loop condition -- if the range has 1 byte, we can't divide the range
2048 so there's no point trying further. */
2049 while (current_end - current_begin > 1)
2050 {
2051 ULONGEST first_half_begin, first_half_end;
2052 ULONGEST second_half_begin, second_half_end;
2053 LONGEST xfer;
2054 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2055
2056 if (forward)
2057 {
2058 first_half_begin = current_begin;
2059 first_half_end = middle;
2060 second_half_begin = middle;
2061 second_half_end = current_end;
2062 }
2063 else
2064 {
2065 first_half_begin = middle;
2066 first_half_end = current_end;
2067 second_half_begin = current_begin;
2068 second_half_end = middle;
2069 }
2070
2071 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2072 buf + (first_half_begin - begin),
2073 first_half_begin,
2074 first_half_end - first_half_begin);
2075
2076 if (xfer == first_half_end - first_half_begin)
2077 {
2078 /* This half reads up fine. So, the error must be in the
2079 other half. */
2080 current_begin = second_half_begin;
2081 current_end = second_half_end;
2082 }
2083 else
2084 {
2085 /* This half is not readable. Because we've tried one byte, we
2086 know some part of this half if actually redable. Go to the next
2087 iteration to divide again and try to read.
2088
2089 We don't handle the other half, because this function only tries
2090 to read a single readable subrange. */
2091 current_begin = first_half_begin;
2092 current_end = first_half_end;
2093 }
2094 }
2095
2096 if (forward)
2097 {
2098 /* The [begin, current_begin) range has been read. */
2099 r.begin = begin;
2100 r.end = current_begin;
2101 r.data = buf;
2102 }
2103 else
2104 {
2105 /* The [current_end, end) range has been read. */
2106 LONGEST rlen = end - current_end;
2107
2108 r.data = xmalloc (rlen);
2109 memcpy (r.data, buf + current_end - begin, rlen);
2110 r.begin = current_end;
2111 r.end = end;
2112 xfree (buf);
2113 }
2114 VEC_safe_push(memory_read_result_s, (*result), &r);
2115 }
2116
2117 void
2118 free_memory_read_result_vector (void *x)
2119 {
2120 VEC(memory_read_result_s) *v = x;
2121 memory_read_result_s *current;
2122 int ix;
2123
2124 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2125 {
2126 xfree (current->data);
2127 }
2128 VEC_free (memory_read_result_s, v);
2129 }
2130
2131 VEC(memory_read_result_s) *
2132 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2133 {
2134 VEC(memory_read_result_s) *result = 0;
2135
2136 LONGEST xfered = 0;
2137 while (xfered < len)
2138 {
2139 struct mem_region *region = lookup_mem_region (offset + xfered);
2140 LONGEST rlen;
2141
2142 /* If there is no explicit region, a fake one should be created. */
2143 gdb_assert (region);
2144
2145 if (region->hi == 0)
2146 rlen = len - xfered;
2147 else
2148 rlen = region->hi - offset;
2149
2150 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2151 {
2152 /* Cannot read this region. Note that we can end up here only
2153 if the region is explicitly marked inaccessible, or
2154 'inaccessible-by-default' is in effect. */
2155 xfered += rlen;
2156 }
2157 else
2158 {
2159 LONGEST to_read = min (len - xfered, rlen);
2160 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2161
2162 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2163 (gdb_byte *) buffer,
2164 offset + xfered, to_read);
2165 /* Call an observer, notifying them of the xfer progress? */
2166 if (xfer <= 0)
2167 {
2168 /* Got an error reading full chunk. See if maybe we can read
2169 some subrange. */
2170 xfree (buffer);
2171 read_whatever_is_readable (ops, offset + xfered,
2172 offset + xfered + to_read, &result);
2173 xfered += to_read;
2174 }
2175 else
2176 {
2177 struct memory_read_result r;
2178 r.data = buffer;
2179 r.begin = offset + xfered;
2180 r.end = r.begin + xfer;
2181 VEC_safe_push (memory_read_result_s, result, &r);
2182 xfered += xfer;
2183 }
2184 QUIT;
2185 }
2186 }
2187 return result;
2188 }
2189
2190
2191 /* An alternative to target_write with progress callbacks. */
2192
2193 LONGEST
2194 target_write_with_progress (struct target_ops *ops,
2195 enum target_object object,
2196 const char *annex, const gdb_byte *buf,
2197 ULONGEST offset, LONGEST len,
2198 void (*progress) (ULONGEST, void *), void *baton)
2199 {
2200 LONGEST xfered = 0;
2201
2202 /* Give the progress callback a chance to set up. */
2203 if (progress)
2204 (*progress) (0, baton);
2205
2206 while (xfered < len)
2207 {
2208 ULONGEST xfered_len;
2209 enum target_xfer_status status;
2210
2211 status = target_write_partial (ops, object, annex,
2212 (gdb_byte *) buf + xfered,
2213 offset + xfered, len - xfered,
2214 &xfered_len);
2215
2216 if (status == TARGET_XFER_EOF)
2217 return xfered;
2218 if (TARGET_XFER_STATUS_ERROR_P (status))
2219 return -1;
2220
2221 gdb_assert (status == TARGET_XFER_OK);
2222 if (progress)
2223 (*progress) (xfered_len, baton);
2224
2225 xfered += xfered_len;
2226 QUIT;
2227 }
2228 return len;
2229 }
2230
2231 /* For docs on target_write see target.h. */
2232
2233 LONGEST
2234 target_write (struct target_ops *ops,
2235 enum target_object object,
2236 const char *annex, const gdb_byte *buf,
2237 ULONGEST offset, LONGEST len)
2238 {
2239 return target_write_with_progress (ops, object, annex, buf, offset, len,
2240 NULL, NULL);
2241 }
2242
2243 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2244 the size of the transferred data. PADDING additional bytes are
2245 available in *BUF_P. This is a helper function for
2246 target_read_alloc; see the declaration of that function for more
2247 information. */
2248
2249 static LONGEST
2250 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2251 const char *annex, gdb_byte **buf_p, int padding)
2252 {
2253 size_t buf_alloc, buf_pos;
2254 gdb_byte *buf;
2255
2256 /* This function does not have a length parameter; it reads the
2257 entire OBJECT). Also, it doesn't support objects fetched partly
2258 from one target and partly from another (in a different stratum,
2259 e.g. a core file and an executable). Both reasons make it
2260 unsuitable for reading memory. */
2261 gdb_assert (object != TARGET_OBJECT_MEMORY);
2262
2263 /* Start by reading up to 4K at a time. The target will throttle
2264 this number down if necessary. */
2265 buf_alloc = 4096;
2266 buf = xmalloc (buf_alloc);
2267 buf_pos = 0;
2268 while (1)
2269 {
2270 ULONGEST xfered_len;
2271 enum target_xfer_status status;
2272
2273 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2274 buf_pos, buf_alloc - buf_pos - padding,
2275 &xfered_len);
2276
2277 if (status == TARGET_XFER_EOF)
2278 {
2279 /* Read all there was. */
2280 if (buf_pos == 0)
2281 xfree (buf);
2282 else
2283 *buf_p = buf;
2284 return buf_pos;
2285 }
2286 else if (status != TARGET_XFER_OK)
2287 {
2288 /* An error occurred. */
2289 xfree (buf);
2290 return TARGET_XFER_E_IO;
2291 }
2292
2293 buf_pos += xfered_len;
2294
2295 /* If the buffer is filling up, expand it. */
2296 if (buf_alloc < buf_pos * 2)
2297 {
2298 buf_alloc *= 2;
2299 buf = xrealloc (buf, buf_alloc);
2300 }
2301
2302 QUIT;
2303 }
2304 }
2305
2306 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2307 the size of the transferred data. See the declaration in "target.h"
2308 function for more information about the return value. */
2309
2310 LONGEST
2311 target_read_alloc (struct target_ops *ops, enum target_object object,
2312 const char *annex, gdb_byte **buf_p)
2313 {
2314 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2315 }
2316
2317 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2318 returned as a string, allocated using xmalloc. If an error occurs
2319 or the transfer is unsupported, NULL is returned. Empty objects
2320 are returned as allocated but empty strings. A warning is issued
2321 if the result contains any embedded NUL bytes. */
2322
2323 char *
2324 target_read_stralloc (struct target_ops *ops, enum target_object object,
2325 const char *annex)
2326 {
2327 gdb_byte *buffer;
2328 char *bufstr;
2329 LONGEST i, transferred;
2330
2331 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2332 bufstr = (char *) buffer;
2333
2334 if (transferred < 0)
2335 return NULL;
2336
2337 if (transferred == 0)
2338 return xstrdup ("");
2339
2340 bufstr[transferred] = 0;
2341
2342 /* Check for embedded NUL bytes; but allow trailing NULs. */
2343 for (i = strlen (bufstr); i < transferred; i++)
2344 if (bufstr[i] != 0)
2345 {
2346 warning (_("target object %d, annex %s, "
2347 "contained unexpected null characters"),
2348 (int) object, annex ? annex : "(none)");
2349 break;
2350 }
2351
2352 return bufstr;
2353 }
2354
2355 /* Memory transfer methods. */
2356
2357 void
2358 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2359 LONGEST len)
2360 {
2361 /* This method is used to read from an alternate, non-current
2362 target. This read must bypass the overlay support (as symbols
2363 don't match this target), and GDB's internal cache (wrong cache
2364 for this target). */
2365 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2366 != len)
2367 memory_error (TARGET_XFER_E_IO, addr);
2368 }
2369
2370 ULONGEST
2371 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2372 int len, enum bfd_endian byte_order)
2373 {
2374 gdb_byte buf[sizeof (ULONGEST)];
2375
2376 gdb_assert (len <= sizeof (buf));
2377 get_target_memory (ops, addr, buf, len);
2378 return extract_unsigned_integer (buf, len, byte_order);
2379 }
2380
2381 /* See target.h. */
2382
2383 int
2384 target_insert_breakpoint (struct gdbarch *gdbarch,
2385 struct bp_target_info *bp_tgt)
2386 {
2387 if (!may_insert_breakpoints)
2388 {
2389 warning (_("May not insert breakpoints"));
2390 return 1;
2391 }
2392
2393 return current_target.to_insert_breakpoint (&current_target,
2394 gdbarch, bp_tgt);
2395 }
2396
2397 /* See target.h. */
2398
2399 int
2400 target_remove_breakpoint (struct gdbarch *gdbarch,
2401 struct bp_target_info *bp_tgt)
2402 {
2403 /* This is kind of a weird case to handle, but the permission might
2404 have been changed after breakpoints were inserted - in which case
2405 we should just take the user literally and assume that any
2406 breakpoints should be left in place. */
2407 if (!may_insert_breakpoints)
2408 {
2409 warning (_("May not remove breakpoints"));
2410 return 1;
2411 }
2412
2413 return current_target.to_remove_breakpoint (&current_target,
2414 gdbarch, bp_tgt);
2415 }
2416
2417 static void
2418 target_info (char *args, int from_tty)
2419 {
2420 struct target_ops *t;
2421 int has_all_mem = 0;
2422
2423 if (symfile_objfile != NULL)
2424 printf_unfiltered (_("Symbols from \"%s\".\n"),
2425 objfile_name (symfile_objfile));
2426
2427 for (t = target_stack; t != NULL; t = t->beneath)
2428 {
2429 if (!(*t->to_has_memory) (t))
2430 continue;
2431
2432 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2433 continue;
2434 if (has_all_mem)
2435 printf_unfiltered (_("\tWhile running this, "
2436 "GDB does not access memory from...\n"));
2437 printf_unfiltered ("%s:\n", t->to_longname);
2438 (t->to_files_info) (t);
2439 has_all_mem = (*t->to_has_all_memory) (t);
2440 }
2441 }
2442
2443 /* This function is called before any new inferior is created, e.g.
2444 by running a program, attaching, or connecting to a target.
2445 It cleans up any state from previous invocations which might
2446 change between runs. This is a subset of what target_preopen
2447 resets (things which might change between targets). */
2448
2449 void
2450 target_pre_inferior (int from_tty)
2451 {
2452 /* Clear out solib state. Otherwise the solib state of the previous
2453 inferior might have survived and is entirely wrong for the new
2454 target. This has been observed on GNU/Linux using glibc 2.3. How
2455 to reproduce:
2456
2457 bash$ ./foo&
2458 [1] 4711
2459 bash$ ./foo&
2460 [1] 4712
2461 bash$ gdb ./foo
2462 [...]
2463 (gdb) attach 4711
2464 (gdb) detach
2465 (gdb) attach 4712
2466 Cannot access memory at address 0xdeadbeef
2467 */
2468
2469 /* In some OSs, the shared library list is the same/global/shared
2470 across inferiors. If code is shared between processes, so are
2471 memory regions and features. */
2472 if (!gdbarch_has_global_solist (target_gdbarch ()))
2473 {
2474 no_shared_libraries (NULL, from_tty);
2475
2476 invalidate_target_mem_regions ();
2477
2478 target_clear_description ();
2479 }
2480
2481 agent_capability_invalidate ();
2482 }
2483
2484 /* Callback for iterate_over_inferiors. Gets rid of the given
2485 inferior. */
2486
2487 static int
2488 dispose_inferior (struct inferior *inf, void *args)
2489 {
2490 struct thread_info *thread;
2491
2492 thread = any_thread_of_process (inf->pid);
2493 if (thread)
2494 {
2495 switch_to_thread (thread->ptid);
2496
2497 /* Core inferiors actually should be detached, not killed. */
2498 if (target_has_execution)
2499 target_kill ();
2500 else
2501 target_detach (NULL, 0);
2502 }
2503
2504 return 0;
2505 }
2506
2507 /* This is to be called by the open routine before it does
2508 anything. */
2509
2510 void
2511 target_preopen (int from_tty)
2512 {
2513 dont_repeat ();
2514
2515 if (have_inferiors ())
2516 {
2517 if (!from_tty
2518 || !have_live_inferiors ()
2519 || query (_("A program is being debugged already. Kill it? ")))
2520 iterate_over_inferiors (dispose_inferior, NULL);
2521 else
2522 error (_("Program not killed."));
2523 }
2524
2525 /* Calling target_kill may remove the target from the stack. But if
2526 it doesn't (which seems like a win for UDI), remove it now. */
2527 /* Leave the exec target, though. The user may be switching from a
2528 live process to a core of the same program. */
2529 pop_all_targets_above (file_stratum);
2530
2531 target_pre_inferior (from_tty);
2532 }
2533
2534 /* Detach a target after doing deferred register stores. */
2535
2536 void
2537 target_detach (const char *args, int from_tty)
2538 {
2539 struct target_ops* t;
2540
2541 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2542 /* Don't remove global breakpoints here. They're removed on
2543 disconnection from the target. */
2544 ;
2545 else
2546 /* If we're in breakpoints-always-inserted mode, have to remove
2547 them before detaching. */
2548 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2549
2550 prepare_for_detach ();
2551
2552 current_target.to_detach (&current_target, args, from_tty);
2553 if (targetdebug)
2554 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2555 args, from_tty);
2556 }
2557
2558 void
2559 target_disconnect (char *args, int from_tty)
2560 {
2561 struct target_ops *t;
2562
2563 /* If we're in breakpoints-always-inserted mode or if breakpoints
2564 are global across processes, we have to remove them before
2565 disconnecting. */
2566 remove_breakpoints ();
2567
2568 for (t = current_target.beneath; t != NULL; t = t->beneath)
2569 if (t->to_disconnect != NULL)
2570 {
2571 if (targetdebug)
2572 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2573 args, from_tty);
2574 t->to_disconnect (t, args, from_tty);
2575 return;
2576 }
2577
2578 tcomplain ();
2579 }
2580
2581 ptid_t
2582 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2583 {
2584 struct target_ops *t;
2585 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2586 status, options);
2587
2588 if (targetdebug)
2589 {
2590 char *status_string;
2591 char *options_string;
2592
2593 status_string = target_waitstatus_to_string (status);
2594 options_string = target_options_to_string (options);
2595 fprintf_unfiltered (gdb_stdlog,
2596 "target_wait (%d, status, options={%s})"
2597 " = %d, %s\n",
2598 ptid_get_pid (ptid), options_string,
2599 ptid_get_pid (retval), status_string);
2600 xfree (status_string);
2601 xfree (options_string);
2602 }
2603
2604 return retval;
2605 }
2606
2607 char *
2608 target_pid_to_str (ptid_t ptid)
2609 {
2610 struct target_ops *t;
2611
2612 for (t = current_target.beneath; t != NULL; t = t->beneath)
2613 {
2614 if (t->to_pid_to_str != NULL)
2615 return (*t->to_pid_to_str) (t, ptid);
2616 }
2617
2618 return normal_pid_to_str (ptid);
2619 }
2620
2621 char *
2622 target_thread_name (struct thread_info *info)
2623 {
2624 return current_target.to_thread_name (&current_target, info);
2625 }
2626
2627 void
2628 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2629 {
2630 struct target_ops *t;
2631
2632 target_dcache_invalidate ();
2633
2634 current_target.to_resume (&current_target, ptid, step, signal);
2635 if (targetdebug)
2636 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2637 ptid_get_pid (ptid),
2638 step ? "step" : "continue",
2639 gdb_signal_to_name (signal));
2640
2641 registers_changed_ptid (ptid);
2642 set_executing (ptid, 1);
2643 set_running (ptid, 1);
2644 clear_inline_frame_state (ptid);
2645 }
2646
2647 void
2648 target_pass_signals (int numsigs, unsigned char *pass_signals)
2649 {
2650 struct target_ops *t;
2651
2652 for (t = current_target.beneath; t != NULL; t = t->beneath)
2653 {
2654 if (t->to_pass_signals != NULL)
2655 {
2656 if (targetdebug)
2657 {
2658 int i;
2659
2660 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2661 numsigs);
2662
2663 for (i = 0; i < numsigs; i++)
2664 if (pass_signals[i])
2665 fprintf_unfiltered (gdb_stdlog, " %s",
2666 gdb_signal_to_name (i));
2667
2668 fprintf_unfiltered (gdb_stdlog, " })\n");
2669 }
2670
2671 (*t->to_pass_signals) (t, numsigs, pass_signals);
2672 return;
2673 }
2674 }
2675 }
2676
2677 void
2678 target_program_signals (int numsigs, unsigned char *program_signals)
2679 {
2680 struct target_ops *t;
2681
2682 for (t = current_target.beneath; t != NULL; t = t->beneath)
2683 {
2684 if (t->to_program_signals != NULL)
2685 {
2686 if (targetdebug)
2687 {
2688 int i;
2689
2690 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2691 numsigs);
2692
2693 for (i = 0; i < numsigs; i++)
2694 if (program_signals[i])
2695 fprintf_unfiltered (gdb_stdlog, " %s",
2696 gdb_signal_to_name (i));
2697
2698 fprintf_unfiltered (gdb_stdlog, " })\n");
2699 }
2700
2701 (*t->to_program_signals) (t, numsigs, program_signals);
2702 return;
2703 }
2704 }
2705 }
2706
2707 /* Look through the list of possible targets for a target that can
2708 follow forks. */
2709
2710 int
2711 target_follow_fork (int follow_child, int detach_fork)
2712 {
2713 struct target_ops *t;
2714
2715 for (t = current_target.beneath; t != NULL; t = t->beneath)
2716 {
2717 if (t->to_follow_fork != NULL)
2718 {
2719 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2720
2721 if (targetdebug)
2722 fprintf_unfiltered (gdb_stdlog,
2723 "target_follow_fork (%d, %d) = %d\n",
2724 follow_child, detach_fork, retval);
2725 return retval;
2726 }
2727 }
2728
2729 /* Some target returned a fork event, but did not know how to follow it. */
2730 internal_error (__FILE__, __LINE__,
2731 _("could not find a target to follow fork"));
2732 }
2733
2734 void
2735 target_mourn_inferior (void)
2736 {
2737 struct target_ops *t;
2738
2739 for (t = current_target.beneath; t != NULL; t = t->beneath)
2740 {
2741 if (t->to_mourn_inferior != NULL)
2742 {
2743 t->to_mourn_inferior (t);
2744 if (targetdebug)
2745 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2746
2747 /* We no longer need to keep handles on any of the object files.
2748 Make sure to release them to avoid unnecessarily locking any
2749 of them while we're not actually debugging. */
2750 bfd_cache_close_all ();
2751
2752 return;
2753 }
2754 }
2755
2756 internal_error (__FILE__, __LINE__,
2757 _("could not find a target to follow mourn inferior"));
2758 }
2759
2760 /* Look for a target which can describe architectural features, starting
2761 from TARGET. If we find one, return its description. */
2762
2763 const struct target_desc *
2764 target_read_description (struct target_ops *target)
2765 {
2766 struct target_ops *t;
2767
2768 for (t = target; t != NULL; t = t->beneath)
2769 if (t->to_read_description != NULL)
2770 {
2771 const struct target_desc *tdesc;
2772
2773 tdesc = t->to_read_description (t);
2774 if (tdesc)
2775 return tdesc;
2776 }
2777
2778 return NULL;
2779 }
2780
2781 /* The default implementation of to_search_memory.
2782 This implements a basic search of memory, reading target memory and
2783 performing the search here (as opposed to performing the search in on the
2784 target side with, for example, gdbserver). */
2785
2786 int
2787 simple_search_memory (struct target_ops *ops,
2788 CORE_ADDR start_addr, ULONGEST search_space_len,
2789 const gdb_byte *pattern, ULONGEST pattern_len,
2790 CORE_ADDR *found_addrp)
2791 {
2792 /* NOTE: also defined in find.c testcase. */
2793 #define SEARCH_CHUNK_SIZE 16000
2794 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2795 /* Buffer to hold memory contents for searching. */
2796 gdb_byte *search_buf;
2797 unsigned search_buf_size;
2798 struct cleanup *old_cleanups;
2799
2800 search_buf_size = chunk_size + pattern_len - 1;
2801
2802 /* No point in trying to allocate a buffer larger than the search space. */
2803 if (search_space_len < search_buf_size)
2804 search_buf_size = search_space_len;
2805
2806 search_buf = malloc (search_buf_size);
2807 if (search_buf == NULL)
2808 error (_("Unable to allocate memory to perform the search."));
2809 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2810
2811 /* Prime the search buffer. */
2812
2813 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2814 search_buf, start_addr, search_buf_size) != search_buf_size)
2815 {
2816 warning (_("Unable to access %s bytes of target "
2817 "memory at %s, halting search."),
2818 pulongest (search_buf_size), hex_string (start_addr));
2819 do_cleanups (old_cleanups);
2820 return -1;
2821 }
2822
2823 /* Perform the search.
2824
2825 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2826 When we've scanned N bytes we copy the trailing bytes to the start and
2827 read in another N bytes. */
2828
2829 while (search_space_len >= pattern_len)
2830 {
2831 gdb_byte *found_ptr;
2832 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2833
2834 found_ptr = memmem (search_buf, nr_search_bytes,
2835 pattern, pattern_len);
2836
2837 if (found_ptr != NULL)
2838 {
2839 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2840
2841 *found_addrp = found_addr;
2842 do_cleanups (old_cleanups);
2843 return 1;
2844 }
2845
2846 /* Not found in this chunk, skip to next chunk. */
2847
2848 /* Don't let search_space_len wrap here, it's unsigned. */
2849 if (search_space_len >= chunk_size)
2850 search_space_len -= chunk_size;
2851 else
2852 search_space_len = 0;
2853
2854 if (search_space_len >= pattern_len)
2855 {
2856 unsigned keep_len = search_buf_size - chunk_size;
2857 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2858 int nr_to_read;
2859
2860 /* Copy the trailing part of the previous iteration to the front
2861 of the buffer for the next iteration. */
2862 gdb_assert (keep_len == pattern_len - 1);
2863 memcpy (search_buf, search_buf + chunk_size, keep_len);
2864
2865 nr_to_read = min (search_space_len - keep_len, chunk_size);
2866
2867 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2868 search_buf + keep_len, read_addr,
2869 nr_to_read) != nr_to_read)
2870 {
2871 warning (_("Unable to access %s bytes of target "
2872 "memory at %s, halting search."),
2873 plongest (nr_to_read),
2874 hex_string (read_addr));
2875 do_cleanups (old_cleanups);
2876 return -1;
2877 }
2878
2879 start_addr += chunk_size;
2880 }
2881 }
2882
2883 /* Not found. */
2884
2885 do_cleanups (old_cleanups);
2886 return 0;
2887 }
2888
2889 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2890 sequence of bytes in PATTERN with length PATTERN_LEN.
2891
2892 The result is 1 if found, 0 if not found, and -1 if there was an error
2893 requiring halting of the search (e.g. memory read error).
2894 If the pattern is found the address is recorded in FOUND_ADDRP. */
2895
2896 int
2897 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2898 const gdb_byte *pattern, ULONGEST pattern_len,
2899 CORE_ADDR *found_addrp)
2900 {
2901 struct target_ops *t;
2902 int found;
2903
2904 /* We don't use INHERIT to set current_target.to_search_memory,
2905 so we have to scan the target stack and handle targetdebug
2906 ourselves. */
2907
2908 if (targetdebug)
2909 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2910 hex_string (start_addr));
2911
2912 for (t = current_target.beneath; t != NULL; t = t->beneath)
2913 if (t->to_search_memory != NULL)
2914 break;
2915
2916 if (t != NULL)
2917 {
2918 found = t->to_search_memory (t, start_addr, search_space_len,
2919 pattern, pattern_len, found_addrp);
2920 }
2921 else
2922 {
2923 /* If a special version of to_search_memory isn't available, use the
2924 simple version. */
2925 found = simple_search_memory (current_target.beneath,
2926 start_addr, search_space_len,
2927 pattern, pattern_len, found_addrp);
2928 }
2929
2930 if (targetdebug)
2931 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2932
2933 return found;
2934 }
2935
2936 /* Look through the currently pushed targets. If none of them will
2937 be able to restart the currently running process, issue an error
2938 message. */
2939
2940 void
2941 target_require_runnable (void)
2942 {
2943 struct target_ops *t;
2944
2945 for (t = target_stack; t != NULL; t = t->beneath)
2946 {
2947 /* If this target knows how to create a new program, then
2948 assume we will still be able to after killing the current
2949 one. Either killing and mourning will not pop T, or else
2950 find_default_run_target will find it again. */
2951 if (t->to_create_inferior != NULL)
2952 return;
2953
2954 /* Do not worry about thread_stratum targets that can not
2955 create inferiors. Assume they will be pushed again if
2956 necessary, and continue to the process_stratum. */
2957 if (t->to_stratum == thread_stratum
2958 || t->to_stratum == arch_stratum)
2959 continue;
2960
2961 error (_("The \"%s\" target does not support \"run\". "
2962 "Try \"help target\" or \"continue\"."),
2963 t->to_shortname);
2964 }
2965
2966 /* This function is only called if the target is running. In that
2967 case there should have been a process_stratum target and it
2968 should either know how to create inferiors, or not... */
2969 internal_error (__FILE__, __LINE__, _("No targets found"));
2970 }
2971
2972 /* Look through the list of possible targets for a target that can
2973 execute a run or attach command without any other data. This is
2974 used to locate the default process stratum.
2975
2976 If DO_MESG is not NULL, the result is always valid (error() is
2977 called for errors); else, return NULL on error. */
2978
2979 static struct target_ops *
2980 find_default_run_target (char *do_mesg)
2981 {
2982 struct target_ops **t;
2983 struct target_ops *runable = NULL;
2984 int count;
2985
2986 count = 0;
2987
2988 for (t = target_structs; t < target_structs + target_struct_size;
2989 ++t)
2990 {
2991 if ((*t)->to_can_run && target_can_run (*t))
2992 {
2993 runable = *t;
2994 ++count;
2995 }
2996 }
2997
2998 if (count != 1)
2999 {
3000 if (do_mesg)
3001 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3002 else
3003 return NULL;
3004 }
3005
3006 return runable;
3007 }
3008
3009 void
3010 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3011 {
3012 struct target_ops *t;
3013
3014 t = find_default_run_target ("attach");
3015 (t->to_attach) (t, args, from_tty);
3016 return;
3017 }
3018
3019 void
3020 find_default_create_inferior (struct target_ops *ops,
3021 char *exec_file, char *allargs, char **env,
3022 int from_tty)
3023 {
3024 struct target_ops *t;
3025
3026 t = find_default_run_target ("run");
3027 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3028 return;
3029 }
3030
3031 static int
3032 find_default_can_async_p (struct target_ops *ignore)
3033 {
3034 struct target_ops *t;
3035
3036 /* This may be called before the target is pushed on the stack;
3037 look for the default process stratum. If there's none, gdb isn't
3038 configured with a native debugger, and target remote isn't
3039 connected yet. */
3040 t = find_default_run_target (NULL);
3041 if (t && t->to_can_async_p != delegate_can_async_p)
3042 return (t->to_can_async_p) (t);
3043 return 0;
3044 }
3045
3046 static int
3047 find_default_is_async_p (struct target_ops *ignore)
3048 {
3049 struct target_ops *t;
3050
3051 /* This may be called before the target is pushed on the stack;
3052 look for the default process stratum. If there's none, gdb isn't
3053 configured with a native debugger, and target remote isn't
3054 connected yet. */
3055 t = find_default_run_target (NULL);
3056 if (t && t->to_is_async_p != delegate_is_async_p)
3057 return (t->to_is_async_p) (t);
3058 return 0;
3059 }
3060
3061 static int
3062 find_default_supports_non_stop (struct target_ops *self)
3063 {
3064 struct target_ops *t;
3065
3066 t = find_default_run_target (NULL);
3067 if (t && t->to_supports_non_stop)
3068 return (t->to_supports_non_stop) (t);
3069 return 0;
3070 }
3071
3072 int
3073 target_supports_non_stop (void)
3074 {
3075 struct target_ops *t;
3076
3077 for (t = &current_target; t != NULL; t = t->beneath)
3078 if (t->to_supports_non_stop)
3079 return t->to_supports_non_stop (t);
3080
3081 return 0;
3082 }
3083
3084 /* Implement the "info proc" command. */
3085
3086 int
3087 target_info_proc (char *args, enum info_proc_what what)
3088 {
3089 struct target_ops *t;
3090
3091 /* If we're already connected to something that can get us OS
3092 related data, use it. Otherwise, try using the native
3093 target. */
3094 if (current_target.to_stratum >= process_stratum)
3095 t = current_target.beneath;
3096 else
3097 t = find_default_run_target (NULL);
3098
3099 for (; t != NULL; t = t->beneath)
3100 {
3101 if (t->to_info_proc != NULL)
3102 {
3103 t->to_info_proc (t, args, what);
3104
3105 if (targetdebug)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "target_info_proc (\"%s\", %d)\n", args, what);
3108
3109 return 1;
3110 }
3111 }
3112
3113 return 0;
3114 }
3115
3116 static int
3117 find_default_supports_disable_randomization (struct target_ops *self)
3118 {
3119 struct target_ops *t;
3120
3121 t = find_default_run_target (NULL);
3122 if (t && t->to_supports_disable_randomization)
3123 return (t->to_supports_disable_randomization) (t);
3124 return 0;
3125 }
3126
3127 int
3128 target_supports_disable_randomization (void)
3129 {
3130 struct target_ops *t;
3131
3132 for (t = &current_target; t != NULL; t = t->beneath)
3133 if (t->to_supports_disable_randomization)
3134 return t->to_supports_disable_randomization (t);
3135
3136 return 0;
3137 }
3138
3139 char *
3140 target_get_osdata (const char *type)
3141 {
3142 struct target_ops *t;
3143
3144 /* If we're already connected to something that can get us OS
3145 related data, use it. Otherwise, try using the native
3146 target. */
3147 if (current_target.to_stratum >= process_stratum)
3148 t = current_target.beneath;
3149 else
3150 t = find_default_run_target ("get OS data");
3151
3152 if (!t)
3153 return NULL;
3154
3155 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3156 }
3157
3158 /* Determine the current address space of thread PTID. */
3159
3160 struct address_space *
3161 target_thread_address_space (ptid_t ptid)
3162 {
3163 struct address_space *aspace;
3164 struct inferior *inf;
3165 struct target_ops *t;
3166
3167 for (t = current_target.beneath; t != NULL; t = t->beneath)
3168 {
3169 if (t->to_thread_address_space != NULL)
3170 {
3171 aspace = t->to_thread_address_space (t, ptid);
3172 gdb_assert (aspace);
3173
3174 if (targetdebug)
3175 fprintf_unfiltered (gdb_stdlog,
3176 "target_thread_address_space (%s) = %d\n",
3177 target_pid_to_str (ptid),
3178 address_space_num (aspace));
3179 return aspace;
3180 }
3181 }
3182
3183 /* Fall-back to the "main" address space of the inferior. */
3184 inf = find_inferior_pid (ptid_get_pid (ptid));
3185
3186 if (inf == NULL || inf->aspace == NULL)
3187 internal_error (__FILE__, __LINE__,
3188 _("Can't determine the current "
3189 "address space of thread %s\n"),
3190 target_pid_to_str (ptid));
3191
3192 return inf->aspace;
3193 }
3194
3195
3196 /* Target file operations. */
3197
3198 static struct target_ops *
3199 default_fileio_target (void)
3200 {
3201 /* If we're already connected to something that can perform
3202 file I/O, use it. Otherwise, try using the native target. */
3203 if (current_target.to_stratum >= process_stratum)
3204 return current_target.beneath;
3205 else
3206 return find_default_run_target ("file I/O");
3207 }
3208
3209 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3210 target file descriptor, or -1 if an error occurs (and set
3211 *TARGET_ERRNO). */
3212 int
3213 target_fileio_open (const char *filename, int flags, int mode,
3214 int *target_errno)
3215 {
3216 struct target_ops *t;
3217
3218 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3219 {
3220 if (t->to_fileio_open != NULL)
3221 {
3222 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3223
3224 if (targetdebug)
3225 fprintf_unfiltered (gdb_stdlog,
3226 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3227 filename, flags, mode,
3228 fd, fd != -1 ? 0 : *target_errno);
3229 return fd;
3230 }
3231 }
3232
3233 *target_errno = FILEIO_ENOSYS;
3234 return -1;
3235 }
3236
3237 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3238 Return the number of bytes written, or -1 if an error occurs
3239 (and set *TARGET_ERRNO). */
3240 int
3241 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3242 ULONGEST offset, int *target_errno)
3243 {
3244 struct target_ops *t;
3245
3246 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3247 {
3248 if (t->to_fileio_pwrite != NULL)
3249 {
3250 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3251 target_errno);
3252
3253 if (targetdebug)
3254 fprintf_unfiltered (gdb_stdlog,
3255 "target_fileio_pwrite (%d,...,%d,%s) "
3256 "= %d (%d)\n",
3257 fd, len, pulongest (offset),
3258 ret, ret != -1 ? 0 : *target_errno);
3259 return ret;
3260 }
3261 }
3262
3263 *target_errno = FILEIO_ENOSYS;
3264 return -1;
3265 }
3266
3267 /* Read up to LEN bytes FD on the target into READ_BUF.
3268 Return the number of bytes read, or -1 if an error occurs
3269 (and set *TARGET_ERRNO). */
3270 int
3271 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3272 ULONGEST offset, int *target_errno)
3273 {
3274 struct target_ops *t;
3275
3276 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3277 {
3278 if (t->to_fileio_pread != NULL)
3279 {
3280 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3281 target_errno);
3282
3283 if (targetdebug)
3284 fprintf_unfiltered (gdb_stdlog,
3285 "target_fileio_pread (%d,...,%d,%s) "
3286 "= %d (%d)\n",
3287 fd, len, pulongest (offset),
3288 ret, ret != -1 ? 0 : *target_errno);
3289 return ret;
3290 }
3291 }
3292
3293 *target_errno = FILEIO_ENOSYS;
3294 return -1;
3295 }
3296
3297 /* Close FD on the target. Return 0, or -1 if an error occurs
3298 (and set *TARGET_ERRNO). */
3299 int
3300 target_fileio_close (int fd, int *target_errno)
3301 {
3302 struct target_ops *t;
3303
3304 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3305 {
3306 if (t->to_fileio_close != NULL)
3307 {
3308 int ret = t->to_fileio_close (t, fd, target_errno);
3309
3310 if (targetdebug)
3311 fprintf_unfiltered (gdb_stdlog,
3312 "target_fileio_close (%d) = %d (%d)\n",
3313 fd, ret, ret != -1 ? 0 : *target_errno);
3314 return ret;
3315 }
3316 }
3317
3318 *target_errno = FILEIO_ENOSYS;
3319 return -1;
3320 }
3321
3322 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3323 occurs (and set *TARGET_ERRNO). */
3324 int
3325 target_fileio_unlink (const char *filename, int *target_errno)
3326 {
3327 struct target_ops *t;
3328
3329 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3330 {
3331 if (t->to_fileio_unlink != NULL)
3332 {
3333 int ret = t->to_fileio_unlink (t, filename, target_errno);
3334
3335 if (targetdebug)
3336 fprintf_unfiltered (gdb_stdlog,
3337 "target_fileio_unlink (%s) = %d (%d)\n",
3338 filename, ret, ret != -1 ? 0 : *target_errno);
3339 return ret;
3340 }
3341 }
3342
3343 *target_errno = FILEIO_ENOSYS;
3344 return -1;
3345 }
3346
3347 /* Read value of symbolic link FILENAME on the target. Return a
3348 null-terminated string allocated via xmalloc, or NULL if an error
3349 occurs (and set *TARGET_ERRNO). */
3350 char *
3351 target_fileio_readlink (const char *filename, int *target_errno)
3352 {
3353 struct target_ops *t;
3354
3355 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3356 {
3357 if (t->to_fileio_readlink != NULL)
3358 {
3359 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3360
3361 if (targetdebug)
3362 fprintf_unfiltered (gdb_stdlog,
3363 "target_fileio_readlink (%s) = %s (%d)\n",
3364 filename, ret? ret : "(nil)",
3365 ret? 0 : *target_errno);
3366 return ret;
3367 }
3368 }
3369
3370 *target_errno = FILEIO_ENOSYS;
3371 return NULL;
3372 }
3373
3374 static void
3375 target_fileio_close_cleanup (void *opaque)
3376 {
3377 int fd = *(int *) opaque;
3378 int target_errno;
3379
3380 target_fileio_close (fd, &target_errno);
3381 }
3382
3383 /* Read target file FILENAME. Store the result in *BUF_P and
3384 return the size of the transferred data. PADDING additional bytes are
3385 available in *BUF_P. This is a helper function for
3386 target_fileio_read_alloc; see the declaration of that function for more
3387 information. */
3388
3389 static LONGEST
3390 target_fileio_read_alloc_1 (const char *filename,
3391 gdb_byte **buf_p, int padding)
3392 {
3393 struct cleanup *close_cleanup;
3394 size_t buf_alloc, buf_pos;
3395 gdb_byte *buf;
3396 LONGEST n;
3397 int fd;
3398 int target_errno;
3399
3400 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3401 if (fd == -1)
3402 return -1;
3403
3404 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3405
3406 /* Start by reading up to 4K at a time. The target will throttle
3407 this number down if necessary. */
3408 buf_alloc = 4096;
3409 buf = xmalloc (buf_alloc);
3410 buf_pos = 0;
3411 while (1)
3412 {
3413 n = target_fileio_pread (fd, &buf[buf_pos],
3414 buf_alloc - buf_pos - padding, buf_pos,
3415 &target_errno);
3416 if (n < 0)
3417 {
3418 /* An error occurred. */
3419 do_cleanups (close_cleanup);
3420 xfree (buf);
3421 return -1;
3422 }
3423 else if (n == 0)
3424 {
3425 /* Read all there was. */
3426 do_cleanups (close_cleanup);
3427 if (buf_pos == 0)
3428 xfree (buf);
3429 else
3430 *buf_p = buf;
3431 return buf_pos;
3432 }
3433
3434 buf_pos += n;
3435
3436 /* If the buffer is filling up, expand it. */
3437 if (buf_alloc < buf_pos * 2)
3438 {
3439 buf_alloc *= 2;
3440 buf = xrealloc (buf, buf_alloc);
3441 }
3442
3443 QUIT;
3444 }
3445 }
3446
3447 /* Read target file FILENAME. Store the result in *BUF_P and return
3448 the size of the transferred data. See the declaration in "target.h"
3449 function for more information about the return value. */
3450
3451 LONGEST
3452 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3453 {
3454 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3455 }
3456
3457 /* Read target file FILENAME. The result is NUL-terminated and
3458 returned as a string, allocated using xmalloc. If an error occurs
3459 or the transfer is unsupported, NULL is returned. Empty objects
3460 are returned as allocated but empty strings. A warning is issued
3461 if the result contains any embedded NUL bytes. */
3462
3463 char *
3464 target_fileio_read_stralloc (const char *filename)
3465 {
3466 gdb_byte *buffer;
3467 char *bufstr;
3468 LONGEST i, transferred;
3469
3470 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3471 bufstr = (char *) buffer;
3472
3473 if (transferred < 0)
3474 return NULL;
3475
3476 if (transferred == 0)
3477 return xstrdup ("");
3478
3479 bufstr[transferred] = 0;
3480
3481 /* Check for embedded NUL bytes; but allow trailing NULs. */
3482 for (i = strlen (bufstr); i < transferred; i++)
3483 if (bufstr[i] != 0)
3484 {
3485 warning (_("target file %s "
3486 "contained unexpected null characters"),
3487 filename);
3488 break;
3489 }
3490
3491 return bufstr;
3492 }
3493
3494
3495 static int
3496 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3497 CORE_ADDR addr, int len)
3498 {
3499 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3500 }
3501
3502 static int
3503 default_watchpoint_addr_within_range (struct target_ops *target,
3504 CORE_ADDR addr,
3505 CORE_ADDR start, int length)
3506 {
3507 return addr >= start && addr < start + length;
3508 }
3509
3510 static struct gdbarch *
3511 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3512 {
3513 return target_gdbarch ();
3514 }
3515
3516 static int
3517 return_zero (void)
3518 {
3519 return 0;
3520 }
3521
3522 static int
3523 return_minus_one (void)
3524 {
3525 return -1;
3526 }
3527
3528 static void *
3529 return_null (void)
3530 {
3531 return 0;
3532 }
3533
3534 /*
3535 * Find the next target down the stack from the specified target.
3536 */
3537
3538 struct target_ops *
3539 find_target_beneath (struct target_ops *t)
3540 {
3541 return t->beneath;
3542 }
3543
3544 /* See target.h. */
3545
3546 struct target_ops *
3547 find_target_at (enum strata stratum)
3548 {
3549 struct target_ops *t;
3550
3551 for (t = current_target.beneath; t != NULL; t = t->beneath)
3552 if (t->to_stratum == stratum)
3553 return t;
3554
3555 return NULL;
3556 }
3557
3558 \f
3559 /* The inferior process has died. Long live the inferior! */
3560
3561 void
3562 generic_mourn_inferior (void)
3563 {
3564 ptid_t ptid;
3565
3566 ptid = inferior_ptid;
3567 inferior_ptid = null_ptid;
3568
3569 /* Mark breakpoints uninserted in case something tries to delete a
3570 breakpoint while we delete the inferior's threads (which would
3571 fail, since the inferior is long gone). */
3572 mark_breakpoints_out ();
3573
3574 if (!ptid_equal (ptid, null_ptid))
3575 {
3576 int pid = ptid_get_pid (ptid);
3577 exit_inferior (pid);
3578 }
3579
3580 /* Note this wipes step-resume breakpoints, so needs to be done
3581 after exit_inferior, which ends up referencing the step-resume
3582 breakpoints through clear_thread_inferior_resources. */
3583 breakpoint_init_inferior (inf_exited);
3584
3585 registers_changed ();
3586
3587 reopen_exec_file ();
3588 reinit_frame_cache ();
3589
3590 if (deprecated_detach_hook)
3591 deprecated_detach_hook ();
3592 }
3593 \f
3594 /* Convert a normal process ID to a string. Returns the string in a
3595 static buffer. */
3596
3597 char *
3598 normal_pid_to_str (ptid_t ptid)
3599 {
3600 static char buf[32];
3601
3602 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3603 return buf;
3604 }
3605
3606 static char *
3607 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3608 {
3609 return normal_pid_to_str (ptid);
3610 }
3611
3612 /* Error-catcher for target_find_memory_regions. */
3613 static int
3614 dummy_find_memory_regions (struct target_ops *self,
3615 find_memory_region_ftype ignore1, void *ignore2)
3616 {
3617 error (_("Command not implemented for this target."));
3618 return 0;
3619 }
3620
3621 /* Error-catcher for target_make_corefile_notes. */
3622 static char *
3623 dummy_make_corefile_notes (struct target_ops *self,
3624 bfd *ignore1, int *ignore2)
3625 {
3626 error (_("Command not implemented for this target."));
3627 return NULL;
3628 }
3629
3630 /* Set up the handful of non-empty slots needed by the dummy target
3631 vector. */
3632
3633 static void
3634 init_dummy_target (void)
3635 {
3636 dummy_target.to_shortname = "None";
3637 dummy_target.to_longname = "None";
3638 dummy_target.to_doc = "";
3639 dummy_target.to_create_inferior = find_default_create_inferior;
3640 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3641 dummy_target.to_supports_disable_randomization
3642 = find_default_supports_disable_randomization;
3643 dummy_target.to_pid_to_str = dummy_pid_to_str;
3644 dummy_target.to_stratum = dummy_stratum;
3645 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3646 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3647 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3648 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3649 dummy_target.to_has_execution
3650 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3651 dummy_target.to_magic = OPS_MAGIC;
3652
3653 install_dummy_methods (&dummy_target);
3654 }
3655 \f
3656 static void
3657 debug_to_open (char *args, int from_tty)
3658 {
3659 debug_target.to_open (args, from_tty);
3660
3661 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3662 }
3663
3664 void
3665 target_close (struct target_ops *targ)
3666 {
3667 gdb_assert (!target_is_pushed (targ));
3668
3669 if (targ->to_xclose != NULL)
3670 targ->to_xclose (targ);
3671 else if (targ->to_close != NULL)
3672 targ->to_close (targ);
3673
3674 if (targetdebug)
3675 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3676 }
3677
3678 void
3679 target_attach (char *args, int from_tty)
3680 {
3681 current_target.to_attach (&current_target, args, from_tty);
3682 if (targetdebug)
3683 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3684 args, from_tty);
3685 }
3686
3687 int
3688 target_thread_alive (ptid_t ptid)
3689 {
3690 struct target_ops *t;
3691
3692 for (t = current_target.beneath; t != NULL; t = t->beneath)
3693 {
3694 if (t->to_thread_alive != NULL)
3695 {
3696 int retval;
3697
3698 retval = t->to_thread_alive (t, ptid);
3699 if (targetdebug)
3700 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3701 ptid_get_pid (ptid), retval);
3702
3703 return retval;
3704 }
3705 }
3706
3707 return 0;
3708 }
3709
3710 void
3711 target_find_new_threads (void)
3712 {
3713 struct target_ops *t;
3714
3715 for (t = current_target.beneath; t != NULL; t = t->beneath)
3716 {
3717 if (t->to_find_new_threads != NULL)
3718 {
3719 t->to_find_new_threads (t);
3720 if (targetdebug)
3721 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3722
3723 return;
3724 }
3725 }
3726 }
3727
3728 void
3729 target_stop (ptid_t ptid)
3730 {
3731 if (!may_stop)
3732 {
3733 warning (_("May not interrupt or stop the target, ignoring attempt"));
3734 return;
3735 }
3736
3737 (*current_target.to_stop) (&current_target, ptid);
3738 }
3739
3740 static void
3741 debug_to_post_attach (struct target_ops *self, int pid)
3742 {
3743 debug_target.to_post_attach (&debug_target, pid);
3744
3745 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3746 }
3747
3748 /* Concatenate ELEM to LIST, a comma separate list, and return the
3749 result. The LIST incoming argument is released. */
3750
3751 static char *
3752 str_comma_list_concat_elem (char *list, const char *elem)
3753 {
3754 if (list == NULL)
3755 return xstrdup (elem);
3756 else
3757 return reconcat (list, list, ", ", elem, (char *) NULL);
3758 }
3759
3760 /* Helper for target_options_to_string. If OPT is present in
3761 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3762 Returns the new resulting string. OPT is removed from
3763 TARGET_OPTIONS. */
3764
3765 static char *
3766 do_option (int *target_options, char *ret,
3767 int opt, char *opt_str)
3768 {
3769 if ((*target_options & opt) != 0)
3770 {
3771 ret = str_comma_list_concat_elem (ret, opt_str);
3772 *target_options &= ~opt;
3773 }
3774
3775 return ret;
3776 }
3777
3778 char *
3779 target_options_to_string (int target_options)
3780 {
3781 char *ret = NULL;
3782
3783 #define DO_TARG_OPTION(OPT) \
3784 ret = do_option (&target_options, ret, OPT, #OPT)
3785
3786 DO_TARG_OPTION (TARGET_WNOHANG);
3787
3788 if (target_options != 0)
3789 ret = str_comma_list_concat_elem (ret, "unknown???");
3790
3791 if (ret == NULL)
3792 ret = xstrdup ("");
3793 return ret;
3794 }
3795
3796 static void
3797 debug_print_register (const char * func,
3798 struct regcache *regcache, int regno)
3799 {
3800 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3801
3802 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3803 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3804 && gdbarch_register_name (gdbarch, regno) != NULL
3805 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3806 fprintf_unfiltered (gdb_stdlog, "(%s)",
3807 gdbarch_register_name (gdbarch, regno));
3808 else
3809 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3810 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3811 {
3812 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3813 int i, size = register_size (gdbarch, regno);
3814 gdb_byte buf[MAX_REGISTER_SIZE];
3815
3816 regcache_raw_collect (regcache, regno, buf);
3817 fprintf_unfiltered (gdb_stdlog, " = ");
3818 for (i = 0; i < size; i++)
3819 {
3820 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3821 }
3822 if (size <= sizeof (LONGEST))
3823 {
3824 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3825
3826 fprintf_unfiltered (gdb_stdlog, " %s %s",
3827 core_addr_to_string_nz (val), plongest (val));
3828 }
3829 }
3830 fprintf_unfiltered (gdb_stdlog, "\n");
3831 }
3832
3833 void
3834 target_fetch_registers (struct regcache *regcache, int regno)
3835 {
3836 struct target_ops *t;
3837
3838 for (t = current_target.beneath; t != NULL; t = t->beneath)
3839 {
3840 if (t->to_fetch_registers != NULL)
3841 {
3842 t->to_fetch_registers (t, regcache, regno);
3843 if (targetdebug)
3844 debug_print_register ("target_fetch_registers", regcache, regno);
3845 return;
3846 }
3847 }
3848 }
3849
3850 void
3851 target_store_registers (struct regcache *regcache, int regno)
3852 {
3853 struct target_ops *t;
3854
3855 if (!may_write_registers)
3856 error (_("Writing to registers is not allowed (regno %d)"), regno);
3857
3858 current_target.to_store_registers (&current_target, regcache, regno);
3859 if (targetdebug)
3860 {
3861 debug_print_register ("target_store_registers", regcache, regno);
3862 }
3863 }
3864
3865 int
3866 target_core_of_thread (ptid_t ptid)
3867 {
3868 struct target_ops *t;
3869
3870 for (t = current_target.beneath; t != NULL; t = t->beneath)
3871 {
3872 if (t->to_core_of_thread != NULL)
3873 {
3874 int retval = t->to_core_of_thread (t, ptid);
3875
3876 if (targetdebug)
3877 fprintf_unfiltered (gdb_stdlog,
3878 "target_core_of_thread (%d) = %d\n",
3879 ptid_get_pid (ptid), retval);
3880 return retval;
3881 }
3882 }
3883
3884 return -1;
3885 }
3886
3887 int
3888 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3889 {
3890 struct target_ops *t;
3891
3892 for (t = current_target.beneath; t != NULL; t = t->beneath)
3893 {
3894 if (t->to_verify_memory != NULL)
3895 {
3896 int retval = t->to_verify_memory (t, data, memaddr, size);
3897
3898 if (targetdebug)
3899 fprintf_unfiltered (gdb_stdlog,
3900 "target_verify_memory (%s, %s) = %d\n",
3901 paddress (target_gdbarch (), memaddr),
3902 pulongest (size),
3903 retval);
3904 return retval;
3905 }
3906 }
3907
3908 tcomplain ();
3909 }
3910
3911 /* The documentation for this function is in its prototype declaration in
3912 target.h. */
3913
3914 int
3915 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3916 {
3917 struct target_ops *t;
3918
3919 for (t = current_target.beneath; t != NULL; t = t->beneath)
3920 if (t->to_insert_mask_watchpoint != NULL)
3921 {
3922 int ret;
3923
3924 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3925
3926 if (targetdebug)
3927 fprintf_unfiltered (gdb_stdlog, "\
3928 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3929 core_addr_to_string (addr),
3930 core_addr_to_string (mask), rw, ret);
3931
3932 return ret;
3933 }
3934
3935 return 1;
3936 }
3937
3938 /* The documentation for this function is in its prototype declaration in
3939 target.h. */
3940
3941 int
3942 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3943 {
3944 struct target_ops *t;
3945
3946 for (t = current_target.beneath; t != NULL; t = t->beneath)
3947 if (t->to_remove_mask_watchpoint != NULL)
3948 {
3949 int ret;
3950
3951 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3952
3953 if (targetdebug)
3954 fprintf_unfiltered (gdb_stdlog, "\
3955 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3956 core_addr_to_string (addr),
3957 core_addr_to_string (mask), rw, ret);
3958
3959 return ret;
3960 }
3961
3962 return 1;
3963 }
3964
3965 /* The documentation for this function is in its prototype declaration
3966 in target.h. */
3967
3968 int
3969 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3970 {
3971 struct target_ops *t;
3972
3973 for (t = current_target.beneath; t != NULL; t = t->beneath)
3974 if (t->to_masked_watch_num_registers != NULL)
3975 return t->to_masked_watch_num_registers (t, addr, mask);
3976
3977 return -1;
3978 }
3979
3980 /* The documentation for this function is in its prototype declaration
3981 in target.h. */
3982
3983 int
3984 target_ranged_break_num_registers (void)
3985 {
3986 struct target_ops *t;
3987
3988 for (t = current_target.beneath; t != NULL; t = t->beneath)
3989 if (t->to_ranged_break_num_registers != NULL)
3990 return t->to_ranged_break_num_registers (t);
3991
3992 return -1;
3993 }
3994
3995 /* See target.h. */
3996
3997 struct btrace_target_info *
3998 target_enable_btrace (ptid_t ptid)
3999 {
4000 struct target_ops *t;
4001
4002 for (t = current_target.beneath; t != NULL; t = t->beneath)
4003 if (t->to_enable_btrace != NULL)
4004 return t->to_enable_btrace (t, ptid);
4005
4006 tcomplain ();
4007 return NULL;
4008 }
4009
4010 /* See target.h. */
4011
4012 void
4013 target_disable_btrace (struct btrace_target_info *btinfo)
4014 {
4015 struct target_ops *t;
4016
4017 for (t = current_target.beneath; t != NULL; t = t->beneath)
4018 if (t->to_disable_btrace != NULL)
4019 {
4020 t->to_disable_btrace (t, btinfo);
4021 return;
4022 }
4023
4024 tcomplain ();
4025 }
4026
4027 /* See target.h. */
4028
4029 void
4030 target_teardown_btrace (struct btrace_target_info *btinfo)
4031 {
4032 struct target_ops *t;
4033
4034 for (t = current_target.beneath; t != NULL; t = t->beneath)
4035 if (t->to_teardown_btrace != NULL)
4036 {
4037 t->to_teardown_btrace (t, btinfo);
4038 return;
4039 }
4040
4041 tcomplain ();
4042 }
4043
4044 /* See target.h. */
4045
4046 enum btrace_error
4047 target_read_btrace (VEC (btrace_block_s) **btrace,
4048 struct btrace_target_info *btinfo,
4049 enum btrace_read_type type)
4050 {
4051 struct target_ops *t;
4052
4053 for (t = current_target.beneath; t != NULL; t = t->beneath)
4054 if (t->to_read_btrace != NULL)
4055 return t->to_read_btrace (t, btrace, btinfo, type);
4056
4057 tcomplain ();
4058 return BTRACE_ERR_NOT_SUPPORTED;
4059 }
4060
4061 /* See target.h. */
4062
4063 void
4064 target_stop_recording (void)
4065 {
4066 struct target_ops *t;
4067
4068 for (t = current_target.beneath; t != NULL; t = t->beneath)
4069 if (t->to_stop_recording != NULL)
4070 {
4071 t->to_stop_recording (t);
4072 return;
4073 }
4074
4075 /* This is optional. */
4076 }
4077
4078 /* See target.h. */
4079
4080 void
4081 target_info_record (void)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 if (t->to_info_record != NULL)
4087 {
4088 t->to_info_record (t);
4089 return;
4090 }
4091
4092 tcomplain ();
4093 }
4094
4095 /* See target.h. */
4096
4097 void
4098 target_save_record (const char *filename)
4099 {
4100 struct target_ops *t;
4101
4102 for (t = current_target.beneath; t != NULL; t = t->beneath)
4103 if (t->to_save_record != NULL)
4104 {
4105 t->to_save_record (t, filename);
4106 return;
4107 }
4108
4109 tcomplain ();
4110 }
4111
4112 /* See target.h. */
4113
4114 int
4115 target_supports_delete_record (void)
4116 {
4117 struct target_ops *t;
4118
4119 for (t = current_target.beneath; t != NULL; t = t->beneath)
4120 if (t->to_delete_record != NULL)
4121 return 1;
4122
4123 return 0;
4124 }
4125
4126 /* See target.h. */
4127
4128 void
4129 target_delete_record (void)
4130 {
4131 struct target_ops *t;
4132
4133 for (t = current_target.beneath; t != NULL; t = t->beneath)
4134 if (t->to_delete_record != NULL)
4135 {
4136 t->to_delete_record (t);
4137 return;
4138 }
4139
4140 tcomplain ();
4141 }
4142
4143 /* See target.h. */
4144
4145 int
4146 target_record_is_replaying (void)
4147 {
4148 struct target_ops *t;
4149
4150 for (t = current_target.beneath; t != NULL; t = t->beneath)
4151 if (t->to_record_is_replaying != NULL)
4152 return t->to_record_is_replaying (t);
4153
4154 return 0;
4155 }
4156
4157 /* See target.h. */
4158
4159 void
4160 target_goto_record_begin (void)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_goto_record_begin != NULL)
4166 {
4167 t->to_goto_record_begin (t);
4168 return;
4169 }
4170
4171 tcomplain ();
4172 }
4173
4174 /* See target.h. */
4175
4176 void
4177 target_goto_record_end (void)
4178 {
4179 struct target_ops *t;
4180
4181 for (t = current_target.beneath; t != NULL; t = t->beneath)
4182 if (t->to_goto_record_end != NULL)
4183 {
4184 t->to_goto_record_end (t);
4185 return;
4186 }
4187
4188 tcomplain ();
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_goto_record (ULONGEST insn)
4195 {
4196 struct target_ops *t;
4197
4198 for (t = current_target.beneath; t != NULL; t = t->beneath)
4199 if (t->to_goto_record != NULL)
4200 {
4201 t->to_goto_record (t, insn);
4202 return;
4203 }
4204
4205 tcomplain ();
4206 }
4207
4208 /* See target.h. */
4209
4210 void
4211 target_insn_history (int size, int flags)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_insn_history != NULL)
4217 {
4218 t->to_insn_history (t, size, flags);
4219 return;
4220 }
4221
4222 tcomplain ();
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_insn_history_from (ULONGEST from, int size, int flags)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_insn_history_from != NULL)
4234 {
4235 t->to_insn_history_from (t, from, size, flags);
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 void
4245 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4246 {
4247 struct target_ops *t;
4248
4249 for (t = current_target.beneath; t != NULL; t = t->beneath)
4250 if (t->to_insn_history_range != NULL)
4251 {
4252 t->to_insn_history_range (t, begin, end, flags);
4253 return;
4254 }
4255
4256 tcomplain ();
4257 }
4258
4259 /* See target.h. */
4260
4261 void
4262 target_call_history (int size, int flags)
4263 {
4264 struct target_ops *t;
4265
4266 for (t = current_target.beneath; t != NULL; t = t->beneath)
4267 if (t->to_call_history != NULL)
4268 {
4269 t->to_call_history (t, size, flags);
4270 return;
4271 }
4272
4273 tcomplain ();
4274 }
4275
4276 /* See target.h. */
4277
4278 void
4279 target_call_history_from (ULONGEST begin, int size, int flags)
4280 {
4281 struct target_ops *t;
4282
4283 for (t = current_target.beneath; t != NULL; t = t->beneath)
4284 if (t->to_call_history_from != NULL)
4285 {
4286 t->to_call_history_from (t, begin, size, flags);
4287 return;
4288 }
4289
4290 tcomplain ();
4291 }
4292
4293 /* See target.h. */
4294
4295 void
4296 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_call_history_range != NULL)
4302 {
4303 t->to_call_history_range (t, begin, end, flags);
4304 return;
4305 }
4306
4307 tcomplain ();
4308 }
4309
4310 static void
4311 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4312 {
4313 debug_target.to_prepare_to_store (&debug_target, regcache);
4314
4315 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4316 }
4317
4318 /* See target.h. */
4319
4320 const struct frame_unwind *
4321 target_get_unwinder (void)
4322 {
4323 struct target_ops *t;
4324
4325 for (t = current_target.beneath; t != NULL; t = t->beneath)
4326 if (t->to_get_unwinder != NULL)
4327 return t->to_get_unwinder;
4328
4329 return NULL;
4330 }
4331
4332 /* See target.h. */
4333
4334 const struct frame_unwind *
4335 target_get_tailcall_unwinder (void)
4336 {
4337 struct target_ops *t;
4338
4339 for (t = current_target.beneath; t != NULL; t = t->beneath)
4340 if (t->to_get_tailcall_unwinder != NULL)
4341 return t->to_get_tailcall_unwinder;
4342
4343 return NULL;
4344 }
4345
4346 /* See target.h. */
4347
4348 CORE_ADDR
4349 forward_target_decr_pc_after_break (struct target_ops *ops,
4350 struct gdbarch *gdbarch)
4351 {
4352 for (; ops != NULL; ops = ops->beneath)
4353 if (ops->to_decr_pc_after_break != NULL)
4354 return ops->to_decr_pc_after_break (ops, gdbarch);
4355
4356 return gdbarch_decr_pc_after_break (gdbarch);
4357 }
4358
4359 /* See target.h. */
4360
4361 CORE_ADDR
4362 target_decr_pc_after_break (struct gdbarch *gdbarch)
4363 {
4364 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4365 }
4366
4367 static int
4368 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4369 int write, struct mem_attrib *attrib,
4370 struct target_ops *target)
4371 {
4372 int retval;
4373
4374 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4375 attrib, target);
4376
4377 fprintf_unfiltered (gdb_stdlog,
4378 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4379 paddress (target_gdbarch (), memaddr), len,
4380 write ? "write" : "read", retval);
4381
4382 if (retval > 0)
4383 {
4384 int i;
4385
4386 fputs_unfiltered (", bytes =", gdb_stdlog);
4387 for (i = 0; i < retval; i++)
4388 {
4389 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4390 {
4391 if (targetdebug < 2 && i > 0)
4392 {
4393 fprintf_unfiltered (gdb_stdlog, " ...");
4394 break;
4395 }
4396 fprintf_unfiltered (gdb_stdlog, "\n");
4397 }
4398
4399 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4400 }
4401 }
4402
4403 fputc_unfiltered ('\n', gdb_stdlog);
4404
4405 return retval;
4406 }
4407
4408 static void
4409 debug_to_files_info (struct target_ops *target)
4410 {
4411 debug_target.to_files_info (target);
4412
4413 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4414 }
4415
4416 static int
4417 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4418 struct bp_target_info *bp_tgt)
4419 {
4420 int retval;
4421
4422 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4423
4424 fprintf_unfiltered (gdb_stdlog,
4425 "target_insert_breakpoint (%s, xxx) = %ld\n",
4426 core_addr_to_string (bp_tgt->placed_address),
4427 (unsigned long) retval);
4428 return retval;
4429 }
4430
4431 static int
4432 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4433 struct bp_target_info *bp_tgt)
4434 {
4435 int retval;
4436
4437 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4438
4439 fprintf_unfiltered (gdb_stdlog,
4440 "target_remove_breakpoint (%s, xxx) = %ld\n",
4441 core_addr_to_string (bp_tgt->placed_address),
4442 (unsigned long) retval);
4443 return retval;
4444 }
4445
4446 static int
4447 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4448 int type, int cnt, int from_tty)
4449 {
4450 int retval;
4451
4452 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4453 type, cnt, from_tty);
4454
4455 fprintf_unfiltered (gdb_stdlog,
4456 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4457 (unsigned long) type,
4458 (unsigned long) cnt,
4459 (unsigned long) from_tty,
4460 (unsigned long) retval);
4461 return retval;
4462 }
4463
4464 static int
4465 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4466 CORE_ADDR addr, int len)
4467 {
4468 CORE_ADDR retval;
4469
4470 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4471 addr, len);
4472
4473 fprintf_unfiltered (gdb_stdlog,
4474 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4475 core_addr_to_string (addr), (unsigned long) len,
4476 core_addr_to_string (retval));
4477 return retval;
4478 }
4479
4480 static int
4481 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4482 CORE_ADDR addr, int len, int rw,
4483 struct expression *cond)
4484 {
4485 int retval;
4486
4487 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4488 addr, len,
4489 rw, cond);
4490
4491 fprintf_unfiltered (gdb_stdlog,
4492 "target_can_accel_watchpoint_condition "
4493 "(%s, %d, %d, %s) = %ld\n",
4494 core_addr_to_string (addr), len, rw,
4495 host_address_to_string (cond), (unsigned long) retval);
4496 return retval;
4497 }
4498
4499 static int
4500 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4501 {
4502 int retval;
4503
4504 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4505
4506 fprintf_unfiltered (gdb_stdlog,
4507 "target_stopped_by_watchpoint () = %ld\n",
4508 (unsigned long) retval);
4509 return retval;
4510 }
4511
4512 static int
4513 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4514 {
4515 int retval;
4516
4517 retval = debug_target.to_stopped_data_address (target, addr);
4518
4519 fprintf_unfiltered (gdb_stdlog,
4520 "target_stopped_data_address ([%s]) = %ld\n",
4521 core_addr_to_string (*addr),
4522 (unsigned long)retval);
4523 return retval;
4524 }
4525
4526 static int
4527 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4528 CORE_ADDR addr,
4529 CORE_ADDR start, int length)
4530 {
4531 int retval;
4532
4533 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4534 start, length);
4535
4536 fprintf_filtered (gdb_stdlog,
4537 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4538 core_addr_to_string (addr), core_addr_to_string (start),
4539 length, retval);
4540 return retval;
4541 }
4542
4543 static int
4544 debug_to_insert_hw_breakpoint (struct target_ops *self,
4545 struct gdbarch *gdbarch,
4546 struct bp_target_info *bp_tgt)
4547 {
4548 int retval;
4549
4550 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4551 gdbarch, bp_tgt);
4552
4553 fprintf_unfiltered (gdb_stdlog,
4554 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4555 core_addr_to_string (bp_tgt->placed_address),
4556 (unsigned long) retval);
4557 return retval;
4558 }
4559
4560 static int
4561 debug_to_remove_hw_breakpoint (struct target_ops *self,
4562 struct gdbarch *gdbarch,
4563 struct bp_target_info *bp_tgt)
4564 {
4565 int retval;
4566
4567 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4568 gdbarch, bp_tgt);
4569
4570 fprintf_unfiltered (gdb_stdlog,
4571 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4572 core_addr_to_string (bp_tgt->placed_address),
4573 (unsigned long) retval);
4574 return retval;
4575 }
4576
4577 static int
4578 debug_to_insert_watchpoint (struct target_ops *self,
4579 CORE_ADDR addr, int len, int type,
4580 struct expression *cond)
4581 {
4582 int retval;
4583
4584 retval = debug_target.to_insert_watchpoint (&debug_target,
4585 addr, len, type, cond);
4586
4587 fprintf_unfiltered (gdb_stdlog,
4588 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4589 core_addr_to_string (addr), len, type,
4590 host_address_to_string (cond), (unsigned long) retval);
4591 return retval;
4592 }
4593
4594 static int
4595 debug_to_remove_watchpoint (struct target_ops *self,
4596 CORE_ADDR addr, int len, int type,
4597 struct expression *cond)
4598 {
4599 int retval;
4600
4601 retval = debug_target.to_remove_watchpoint (&debug_target,
4602 addr, len, type, cond);
4603
4604 fprintf_unfiltered (gdb_stdlog,
4605 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4606 core_addr_to_string (addr), len, type,
4607 host_address_to_string (cond), (unsigned long) retval);
4608 return retval;
4609 }
4610
4611 static void
4612 debug_to_terminal_init (struct target_ops *self)
4613 {
4614 debug_target.to_terminal_init (&debug_target);
4615
4616 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4617 }
4618
4619 static void
4620 debug_to_terminal_inferior (struct target_ops *self)
4621 {
4622 debug_target.to_terminal_inferior (&debug_target);
4623
4624 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4625 }
4626
4627 static void
4628 debug_to_terminal_ours_for_output (struct target_ops *self)
4629 {
4630 debug_target.to_terminal_ours_for_output (&debug_target);
4631
4632 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4633 }
4634
4635 static void
4636 debug_to_terminal_ours (struct target_ops *self)
4637 {
4638 debug_target.to_terminal_ours (&debug_target);
4639
4640 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4641 }
4642
4643 static void
4644 debug_to_terminal_save_ours (struct target_ops *self)
4645 {
4646 debug_target.to_terminal_save_ours (&debug_target);
4647
4648 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4649 }
4650
4651 static void
4652 debug_to_terminal_info (struct target_ops *self,
4653 const char *arg, int from_tty)
4654 {
4655 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4656
4657 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4658 from_tty);
4659 }
4660
4661 static void
4662 debug_to_load (struct target_ops *self, char *args, int from_tty)
4663 {
4664 debug_target.to_load (&debug_target, args, from_tty);
4665
4666 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4667 }
4668
4669 static void
4670 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4671 {
4672 debug_target.to_post_startup_inferior (&debug_target, ptid);
4673
4674 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4675 ptid_get_pid (ptid));
4676 }
4677
4678 static int
4679 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4680 {
4681 int retval;
4682
4683 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4684
4685 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4686 pid, retval);
4687
4688 return retval;
4689 }
4690
4691 static int
4692 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4693 {
4694 int retval;
4695
4696 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4697
4698 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4699 pid, retval);
4700
4701 return retval;
4702 }
4703
4704 static int
4705 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4706 {
4707 int retval;
4708
4709 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4710
4711 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4712 pid, retval);
4713
4714 return retval;
4715 }
4716
4717 static int
4718 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4719 {
4720 int retval;
4721
4722 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4725 pid, retval);
4726
4727 return retval;
4728 }
4729
4730 static int
4731 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4732 {
4733 int retval;
4734
4735 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4736
4737 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4738 pid, retval);
4739
4740 return retval;
4741 }
4742
4743 static int
4744 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4745 {
4746 int retval;
4747
4748 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4749
4750 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4751 pid, retval);
4752
4753 return retval;
4754 }
4755
4756 static int
4757 debug_to_has_exited (struct target_ops *self,
4758 int pid, int wait_status, int *exit_status)
4759 {
4760 int has_exited;
4761
4762 has_exited = debug_target.to_has_exited (&debug_target,
4763 pid, wait_status, exit_status);
4764
4765 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4766 pid, wait_status, *exit_status, has_exited);
4767
4768 return has_exited;
4769 }
4770
4771 static int
4772 debug_to_can_run (struct target_ops *self)
4773 {
4774 int retval;
4775
4776 retval = debug_target.to_can_run (&debug_target);
4777
4778 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4779
4780 return retval;
4781 }
4782
4783 static struct gdbarch *
4784 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4785 {
4786 struct gdbarch *retval;
4787
4788 retval = debug_target.to_thread_architecture (ops, ptid);
4789
4790 fprintf_unfiltered (gdb_stdlog,
4791 "target_thread_architecture (%s) = %s [%s]\n",
4792 target_pid_to_str (ptid),
4793 host_address_to_string (retval),
4794 gdbarch_bfd_arch_info (retval)->printable_name);
4795 return retval;
4796 }
4797
4798 static void
4799 debug_to_stop (struct target_ops *self, ptid_t ptid)
4800 {
4801 debug_target.to_stop (&debug_target, ptid);
4802
4803 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4804 target_pid_to_str (ptid));
4805 }
4806
4807 static void
4808 debug_to_rcmd (struct target_ops *self, char *command,
4809 struct ui_file *outbuf)
4810 {
4811 debug_target.to_rcmd (&debug_target, command, outbuf);
4812 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4813 }
4814
4815 static char *
4816 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4817 {
4818 char *exec_file;
4819
4820 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4821
4822 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4823 pid, exec_file);
4824
4825 return exec_file;
4826 }
4827
4828 static void
4829 setup_target_debug (void)
4830 {
4831 memcpy (&debug_target, &current_target, sizeof debug_target);
4832
4833 current_target.to_open = debug_to_open;
4834 current_target.to_post_attach = debug_to_post_attach;
4835 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4836 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4837 current_target.to_files_info = debug_to_files_info;
4838 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4839 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4840 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4841 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4842 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4843 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4844 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4845 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4846 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4847 current_target.to_watchpoint_addr_within_range
4848 = debug_to_watchpoint_addr_within_range;
4849 current_target.to_region_ok_for_hw_watchpoint
4850 = debug_to_region_ok_for_hw_watchpoint;
4851 current_target.to_can_accel_watchpoint_condition
4852 = debug_to_can_accel_watchpoint_condition;
4853 current_target.to_terminal_init = debug_to_terminal_init;
4854 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4855 current_target.to_terminal_ours_for_output
4856 = debug_to_terminal_ours_for_output;
4857 current_target.to_terminal_ours = debug_to_terminal_ours;
4858 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4859 current_target.to_terminal_info = debug_to_terminal_info;
4860 current_target.to_load = debug_to_load;
4861 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4862 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4863 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4864 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4865 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4866 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4867 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4868 current_target.to_has_exited = debug_to_has_exited;
4869 current_target.to_can_run = debug_to_can_run;
4870 current_target.to_stop = debug_to_stop;
4871 current_target.to_rcmd = debug_to_rcmd;
4872 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4873 current_target.to_thread_architecture = debug_to_thread_architecture;
4874 }
4875 \f
4876
4877 static char targ_desc[] =
4878 "Names of targets and files being debugged.\nShows the entire \
4879 stack of targets currently in use (including the exec-file,\n\
4880 core-file, and process, if any), as well as the symbol file name.";
4881
4882 static void
4883 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4884 {
4885 error (_("\"monitor\" command not supported by this target."));
4886 }
4887
4888 static void
4889 do_monitor_command (char *cmd,
4890 int from_tty)
4891 {
4892 target_rcmd (cmd, gdb_stdtarg);
4893 }
4894
4895 /* Print the name of each layers of our target stack. */
4896
4897 static void
4898 maintenance_print_target_stack (char *cmd, int from_tty)
4899 {
4900 struct target_ops *t;
4901
4902 printf_filtered (_("The current target stack is:\n"));
4903
4904 for (t = target_stack; t != NULL; t = t->beneath)
4905 {
4906 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4907 }
4908 }
4909
4910 /* Controls if async mode is permitted. */
4911 int target_async_permitted = 0;
4912
4913 /* The set command writes to this variable. If the inferior is
4914 executing, target_async_permitted is *not* updated. */
4915 static int target_async_permitted_1 = 0;
4916
4917 static void
4918 set_target_async_command (char *args, int from_tty,
4919 struct cmd_list_element *c)
4920 {
4921 if (have_live_inferiors ())
4922 {
4923 target_async_permitted_1 = target_async_permitted;
4924 error (_("Cannot change this setting while the inferior is running."));
4925 }
4926
4927 target_async_permitted = target_async_permitted_1;
4928 }
4929
4930 static void
4931 show_target_async_command (struct ui_file *file, int from_tty,
4932 struct cmd_list_element *c,
4933 const char *value)
4934 {
4935 fprintf_filtered (file,
4936 _("Controlling the inferior in "
4937 "asynchronous mode is %s.\n"), value);
4938 }
4939
4940 /* Temporary copies of permission settings. */
4941
4942 static int may_write_registers_1 = 1;
4943 static int may_write_memory_1 = 1;
4944 static int may_insert_breakpoints_1 = 1;
4945 static int may_insert_tracepoints_1 = 1;
4946 static int may_insert_fast_tracepoints_1 = 1;
4947 static int may_stop_1 = 1;
4948
4949 /* Make the user-set values match the real values again. */
4950
4951 void
4952 update_target_permissions (void)
4953 {
4954 may_write_registers_1 = may_write_registers;
4955 may_write_memory_1 = may_write_memory;
4956 may_insert_breakpoints_1 = may_insert_breakpoints;
4957 may_insert_tracepoints_1 = may_insert_tracepoints;
4958 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4959 may_stop_1 = may_stop;
4960 }
4961
4962 /* The one function handles (most of) the permission flags in the same
4963 way. */
4964
4965 static void
4966 set_target_permissions (char *args, int from_tty,
4967 struct cmd_list_element *c)
4968 {
4969 if (target_has_execution)
4970 {
4971 update_target_permissions ();
4972 error (_("Cannot change this setting while the inferior is running."));
4973 }
4974
4975 /* Make the real values match the user-changed values. */
4976 may_write_registers = may_write_registers_1;
4977 may_insert_breakpoints = may_insert_breakpoints_1;
4978 may_insert_tracepoints = may_insert_tracepoints_1;
4979 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4980 may_stop = may_stop_1;
4981 update_observer_mode ();
4982 }
4983
4984 /* Set memory write permission independently of observer mode. */
4985
4986 static void
4987 set_write_memory_permission (char *args, int from_tty,
4988 struct cmd_list_element *c)
4989 {
4990 /* Make the real values match the user-changed values. */
4991 may_write_memory = may_write_memory_1;
4992 update_observer_mode ();
4993 }
4994
4995
4996 void
4997 initialize_targets (void)
4998 {
4999 init_dummy_target ();
5000 push_target (&dummy_target);
5001
5002 add_info ("target", target_info, targ_desc);
5003 add_info ("files", target_info, targ_desc);
5004
5005 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5006 Set target debugging."), _("\
5007 Show target debugging."), _("\
5008 When non-zero, target debugging is enabled. Higher numbers are more\n\
5009 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5010 command."),
5011 NULL,
5012 show_targetdebug,
5013 &setdebuglist, &showdebuglist);
5014
5015 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5016 &trust_readonly, _("\
5017 Set mode for reading from readonly sections."), _("\
5018 Show mode for reading from readonly sections."), _("\
5019 When this mode is on, memory reads from readonly sections (such as .text)\n\
5020 will be read from the object file instead of from the target. This will\n\
5021 result in significant performance improvement for remote targets."),
5022 NULL,
5023 show_trust_readonly,
5024 &setlist, &showlist);
5025
5026 add_com ("monitor", class_obscure, do_monitor_command,
5027 _("Send a command to the remote monitor (remote targets only)."));
5028
5029 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5030 _("Print the name of each layer of the internal target stack."),
5031 &maintenanceprintlist);
5032
5033 add_setshow_boolean_cmd ("target-async", no_class,
5034 &target_async_permitted_1, _("\
5035 Set whether gdb controls the inferior in asynchronous mode."), _("\
5036 Show whether gdb controls the inferior in asynchronous mode."), _("\
5037 Tells gdb whether to control the inferior in asynchronous mode."),
5038 set_target_async_command,
5039 show_target_async_command,
5040 &setlist,
5041 &showlist);
5042
5043 add_setshow_boolean_cmd ("may-write-registers", class_support,
5044 &may_write_registers_1, _("\
5045 Set permission to write into registers."), _("\
5046 Show permission to write into registers."), _("\
5047 When this permission is on, GDB may write into the target's registers.\n\
5048 Otherwise, any sort of write attempt will result in an error."),
5049 set_target_permissions, NULL,
5050 &setlist, &showlist);
5051
5052 add_setshow_boolean_cmd ("may-write-memory", class_support,
5053 &may_write_memory_1, _("\
5054 Set permission to write into target memory."), _("\
5055 Show permission to write into target memory."), _("\
5056 When this permission is on, GDB may write into the target's memory.\n\
5057 Otherwise, any sort of write attempt will result in an error."),
5058 set_write_memory_permission, NULL,
5059 &setlist, &showlist);
5060
5061 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5062 &may_insert_breakpoints_1, _("\
5063 Set permission to insert breakpoints in the target."), _("\
5064 Show permission to insert breakpoints in the target."), _("\
5065 When this permission is on, GDB may insert breakpoints in the program.\n\
5066 Otherwise, any sort of insertion attempt will result in an error."),
5067 set_target_permissions, NULL,
5068 &setlist, &showlist);
5069
5070 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5071 &may_insert_tracepoints_1, _("\
5072 Set permission to insert tracepoints in the target."), _("\
5073 Show permission to insert tracepoints in the target."), _("\
5074 When this permission is on, GDB may insert tracepoints in the program.\n\
5075 Otherwise, any sort of insertion attempt will result in an error."),
5076 set_target_permissions, NULL,
5077 &setlist, &showlist);
5078
5079 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5080 &may_insert_fast_tracepoints_1, _("\
5081 Set permission to insert fast tracepoints in the target."), _("\
5082 Show permission to insert fast tracepoints in the target."), _("\
5083 When this permission is on, GDB may insert fast tracepoints.\n\
5084 Otherwise, any sort of insertion attempt will result in an error."),
5085 set_target_permissions, NULL,
5086 &setlist, &showlist);
5087
5088 add_setshow_boolean_cmd ("may-interrupt", class_support,
5089 &may_stop_1, _("\
5090 Set permission to interrupt or signal the target."), _("\
5091 Show permission to interrupt or signal the target."), _("\
5092 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5093 Otherwise, any attempt to interrupt or stop will be ignored."),
5094 set_target_permissions, NULL,
5095 &setlist, &showlist);
5096 }
This page took 0.138625 seconds and 4 git commands to generate.