Add target_xfer_partial_ftype
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 void target_ignore (void);
68
69 static void target_command (char *, int);
70
71 static struct target_ops *find_default_run_target (char *);
72
73 static target_xfer_partial_ftype default_xfer_partial;
74
75 static target_xfer_partial_ftype current_xfer_partial;
76
77 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
78 ptid_t ptid);
79
80 static void init_dummy_target (void);
81
82 static struct target_ops debug_target;
83
84 static void debug_to_open (char *, int);
85
86 static void debug_to_prepare_to_store (struct regcache *);
87
88 static void debug_to_files_info (struct target_ops *);
89
90 static int debug_to_insert_breakpoint (struct gdbarch *,
91 struct bp_target_info *);
92
93 static int debug_to_remove_breakpoint (struct gdbarch *,
94 struct bp_target_info *);
95
96 static int debug_to_can_use_hw_breakpoint (int, int, int);
97
98 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
105 struct expression *);
106
107 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
108 struct expression *);
109
110 static int debug_to_stopped_by_watchpoint (void);
111
112 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
113
114 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
115 CORE_ADDR, CORE_ADDR, int);
116
117 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
118
119 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
120 struct expression *);
121
122 static void debug_to_terminal_init (void);
123
124 static void debug_to_terminal_inferior (void);
125
126 static void debug_to_terminal_ours_for_output (void);
127
128 static void debug_to_terminal_save_ours (void);
129
130 static void debug_to_terminal_ours (void);
131
132 static void debug_to_load (char *, int);
133
134 static int debug_to_can_run (void);
135
136 static void debug_to_stop (ptid_t);
137
138 /* Pointer to array of target architecture structures; the size of the
139 array; the current index into the array; the allocated size of the
140 array. */
141 struct target_ops **target_structs;
142 unsigned target_struct_size;
143 unsigned target_struct_allocsize;
144 #define DEFAULT_ALLOCSIZE 10
145
146 /* The initial current target, so that there is always a semi-valid
147 current target. */
148
149 static struct target_ops dummy_target;
150
151 /* Top of target stack. */
152
153 static struct target_ops *target_stack;
154
155 /* The target structure we are currently using to talk to a process
156 or file or whatever "inferior" we have. */
157
158 struct target_ops current_target;
159
160 /* Command list for target. */
161
162 static struct cmd_list_element *targetlist = NULL;
163
164 /* Nonzero if we should trust readonly sections from the
165 executable when reading memory. */
166
167 static int trust_readonly = 0;
168
169 /* Nonzero if we should show true memory content including
170 memory breakpoint inserted by gdb. */
171
172 static int show_memory_breakpoints = 0;
173
174 /* These globals control whether GDB attempts to perform these
175 operations; they are useful for targets that need to prevent
176 inadvertant disruption, such as in non-stop mode. */
177
178 int may_write_registers = 1;
179
180 int may_write_memory = 1;
181
182 int may_insert_breakpoints = 1;
183
184 int may_insert_tracepoints = 1;
185
186 int may_insert_fast_tracepoints = 1;
187
188 int may_stop = 1;
189
190 /* Non-zero if we want to see trace of target level stuff. */
191
192 static unsigned int targetdebug = 0;
193 static void
194 show_targetdebug (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196 {
197 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
198 }
199
200 static void setup_target_debug (void);
201
202 /* The user just typed 'target' without the name of a target. */
203
204 static void
205 target_command (char *arg, int from_tty)
206 {
207 fputs_filtered ("Argument required (target name). Try `help target'\n",
208 gdb_stdout);
209 }
210
211 /* Default target_has_* methods for process_stratum targets. */
212
213 int
214 default_child_has_all_memory (struct target_ops *ops)
215 {
216 /* If no inferior selected, then we can't read memory here. */
217 if (ptid_equal (inferior_ptid, null_ptid))
218 return 0;
219
220 return 1;
221 }
222
223 int
224 default_child_has_memory (struct target_ops *ops)
225 {
226 /* If no inferior selected, then we can't read memory here. */
227 if (ptid_equal (inferior_ptid, null_ptid))
228 return 0;
229
230 return 1;
231 }
232
233 int
234 default_child_has_stack (struct target_ops *ops)
235 {
236 /* If no inferior selected, there's no stack. */
237 if (ptid_equal (inferior_ptid, null_ptid))
238 return 0;
239
240 return 1;
241 }
242
243 int
244 default_child_has_registers (struct target_ops *ops)
245 {
246 /* Can't read registers from no inferior. */
247 if (ptid_equal (inferior_ptid, null_ptid))
248 return 0;
249
250 return 1;
251 }
252
253 int
254 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
255 {
256 /* If there's no thread selected, then we can't make it run through
257 hoops. */
258 if (ptid_equal (the_ptid, null_ptid))
259 return 0;
260
261 return 1;
262 }
263
264
265 int
266 target_has_all_memory_1 (void)
267 {
268 struct target_ops *t;
269
270 for (t = current_target.beneath; t != NULL; t = t->beneath)
271 if (t->to_has_all_memory (t))
272 return 1;
273
274 return 0;
275 }
276
277 int
278 target_has_memory_1 (void)
279 {
280 struct target_ops *t;
281
282 for (t = current_target.beneath; t != NULL; t = t->beneath)
283 if (t->to_has_memory (t))
284 return 1;
285
286 return 0;
287 }
288
289 int
290 target_has_stack_1 (void)
291 {
292 struct target_ops *t;
293
294 for (t = current_target.beneath; t != NULL; t = t->beneath)
295 if (t->to_has_stack (t))
296 return 1;
297
298 return 0;
299 }
300
301 int
302 target_has_registers_1 (void)
303 {
304 struct target_ops *t;
305
306 for (t = current_target.beneath; t != NULL; t = t->beneath)
307 if (t->to_has_registers (t))
308 return 1;
309
310 return 0;
311 }
312
313 int
314 target_has_execution_1 (ptid_t the_ptid)
315 {
316 struct target_ops *t;
317
318 for (t = current_target.beneath; t != NULL; t = t->beneath)
319 if (t->to_has_execution (t, the_ptid))
320 return 1;
321
322 return 0;
323 }
324
325 int
326 target_has_execution_current (void)
327 {
328 return target_has_execution_1 (inferior_ptid);
329 }
330
331 /* Complete initialization of T. This ensures that various fields in
332 T are set, if needed by the target implementation. */
333
334 void
335 complete_target_initialization (struct target_ops *t)
336 {
337 /* Provide default values for all "must have" methods. */
338 if (t->to_xfer_partial == NULL)
339 t->to_xfer_partial = default_xfer_partial;
340
341 if (t->to_has_all_memory == NULL)
342 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
343
344 if (t->to_has_memory == NULL)
345 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
346
347 if (t->to_has_stack == NULL)
348 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
349
350 if (t->to_has_registers == NULL)
351 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
352
353 if (t->to_has_execution == NULL)
354 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
355 }
356
357 /* Add possible target architecture T to the list and add a new
358 command 'target T->to_shortname'. Set COMPLETER as the command's
359 completer if not NULL. */
360
361 void
362 add_target_with_completer (struct target_ops *t,
363 completer_ftype *completer)
364 {
365 struct cmd_list_element *c;
366
367 complete_target_initialization (t);
368
369 if (!target_structs)
370 {
371 target_struct_allocsize = DEFAULT_ALLOCSIZE;
372 target_structs = (struct target_ops **) xmalloc
373 (target_struct_allocsize * sizeof (*target_structs));
374 }
375 if (target_struct_size >= target_struct_allocsize)
376 {
377 target_struct_allocsize *= 2;
378 target_structs = (struct target_ops **)
379 xrealloc ((char *) target_structs,
380 target_struct_allocsize * sizeof (*target_structs));
381 }
382 target_structs[target_struct_size++] = t;
383
384 if (targetlist == NULL)
385 add_prefix_cmd ("target", class_run, target_command, _("\
386 Connect to a target machine or process.\n\
387 The first argument is the type or protocol of the target machine.\n\
388 Remaining arguments are interpreted by the target protocol. For more\n\
389 information on the arguments for a particular protocol, type\n\
390 `help target ' followed by the protocol name."),
391 &targetlist, "target ", 0, &cmdlist);
392 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
393 &targetlist);
394 if (completer != NULL)
395 set_cmd_completer (c, completer);
396 }
397
398 /* Add a possible target architecture to the list. */
399
400 void
401 add_target (struct target_ops *t)
402 {
403 add_target_with_completer (t, NULL);
404 }
405
406 /* See target.h. */
407
408 void
409 add_deprecated_target_alias (struct target_ops *t, char *alias)
410 {
411 struct cmd_list_element *c;
412 char *alt;
413
414 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
415 see PR cli/15104. */
416 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
417 alt = xstrprintf ("target %s", t->to_shortname);
418 deprecate_cmd (c, alt);
419 }
420
421 /* Stub functions */
422
423 void
424 target_ignore (void)
425 {
426 }
427
428 void
429 target_kill (void)
430 {
431 struct target_ops *t;
432
433 for (t = current_target.beneath; t != NULL; t = t->beneath)
434 if (t->to_kill != NULL)
435 {
436 if (targetdebug)
437 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
438
439 t->to_kill (t);
440 return;
441 }
442
443 noprocess ();
444 }
445
446 void
447 target_load (char *arg, int from_tty)
448 {
449 target_dcache_invalidate ();
450 (*current_target.to_load) (arg, from_tty);
451 }
452
453 void
454 target_create_inferior (char *exec_file, char *args,
455 char **env, int from_tty)
456 {
457 struct target_ops *t;
458
459 for (t = current_target.beneath; t != NULL; t = t->beneath)
460 {
461 if (t->to_create_inferior != NULL)
462 {
463 t->to_create_inferior (t, exec_file, args, env, from_tty);
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog,
466 "target_create_inferior (%s, %s, xxx, %d)\n",
467 exec_file, args, from_tty);
468 return;
469 }
470 }
471
472 internal_error (__FILE__, __LINE__,
473 _("could not find a target to create inferior"));
474 }
475
476 void
477 target_terminal_inferior (void)
478 {
479 /* A background resume (``run&'') should leave GDB in control of the
480 terminal. Use target_can_async_p, not target_is_async_p, since at
481 this point the target is not async yet. However, if sync_execution
482 is not set, we know it will become async prior to resume. */
483 if (target_can_async_p () && !sync_execution)
484 return;
485
486 /* If GDB is resuming the inferior in the foreground, install
487 inferior's terminal modes. */
488 (*current_target.to_terminal_inferior) ();
489 }
490
491 static int
492 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
493 struct target_ops *t)
494 {
495 errno = EIO; /* Can't read/write this location. */
496 return 0; /* No bytes handled. */
497 }
498
499 static void
500 tcomplain (void)
501 {
502 error (_("You can't do that when your target is `%s'"),
503 current_target.to_shortname);
504 }
505
506 void
507 noprocess (void)
508 {
509 error (_("You can't do that without a process to debug."));
510 }
511
512 static void
513 default_terminal_info (const char *args, int from_tty)
514 {
515 printf_unfiltered (_("No saved terminal information.\n"));
516 }
517
518 /* A default implementation for the to_get_ada_task_ptid target method.
519
520 This function builds the PTID by using both LWP and TID as part of
521 the PTID lwp and tid elements. The pid used is the pid of the
522 inferior_ptid. */
523
524 static ptid_t
525 default_get_ada_task_ptid (long lwp, long tid)
526 {
527 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
528 }
529
530 static enum exec_direction_kind
531 default_execution_direction (void)
532 {
533 if (!target_can_execute_reverse)
534 return EXEC_FORWARD;
535 else if (!target_can_async_p ())
536 return EXEC_FORWARD;
537 else
538 gdb_assert_not_reached ("\
539 to_execution_direction must be implemented for reverse async");
540 }
541
542 /* Go through the target stack from top to bottom, copying over zero
543 entries in current_target, then filling in still empty entries. In
544 effect, we are doing class inheritance through the pushed target
545 vectors.
546
547 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
548 is currently implemented, is that it discards any knowledge of
549 which target an inherited method originally belonged to.
550 Consequently, new new target methods should instead explicitly and
551 locally search the target stack for the target that can handle the
552 request. */
553
554 static void
555 update_current_target (void)
556 {
557 struct target_ops *t;
558
559 /* First, reset current's contents. */
560 memset (&current_target, 0, sizeof (current_target));
561
562 #define INHERIT(FIELD, TARGET) \
563 if (!current_target.FIELD) \
564 current_target.FIELD = (TARGET)->FIELD
565
566 for (t = target_stack; t; t = t->beneath)
567 {
568 INHERIT (to_shortname, t);
569 INHERIT (to_longname, t);
570 INHERIT (to_doc, t);
571 /* Do not inherit to_open. */
572 /* Do not inherit to_close. */
573 /* Do not inherit to_attach. */
574 INHERIT (to_post_attach, t);
575 INHERIT (to_attach_no_wait, t);
576 /* Do not inherit to_detach. */
577 /* Do not inherit to_disconnect. */
578 /* Do not inherit to_resume. */
579 /* Do not inherit to_wait. */
580 /* Do not inherit to_fetch_registers. */
581 /* Do not inherit to_store_registers. */
582 INHERIT (to_prepare_to_store, t);
583 INHERIT (deprecated_xfer_memory, t);
584 INHERIT (to_files_info, t);
585 INHERIT (to_insert_breakpoint, t);
586 INHERIT (to_remove_breakpoint, t);
587 INHERIT (to_can_use_hw_breakpoint, t);
588 INHERIT (to_insert_hw_breakpoint, t);
589 INHERIT (to_remove_hw_breakpoint, t);
590 /* Do not inherit to_ranged_break_num_registers. */
591 INHERIT (to_insert_watchpoint, t);
592 INHERIT (to_remove_watchpoint, t);
593 /* Do not inherit to_insert_mask_watchpoint. */
594 /* Do not inherit to_remove_mask_watchpoint. */
595 INHERIT (to_stopped_data_address, t);
596 INHERIT (to_have_steppable_watchpoint, t);
597 INHERIT (to_have_continuable_watchpoint, t);
598 INHERIT (to_stopped_by_watchpoint, t);
599 INHERIT (to_watchpoint_addr_within_range, t);
600 INHERIT (to_region_ok_for_hw_watchpoint, t);
601 INHERIT (to_can_accel_watchpoint_condition, t);
602 /* Do not inherit to_masked_watch_num_registers. */
603 INHERIT (to_terminal_init, t);
604 INHERIT (to_terminal_inferior, t);
605 INHERIT (to_terminal_ours_for_output, t);
606 INHERIT (to_terminal_ours, t);
607 INHERIT (to_terminal_save_ours, t);
608 INHERIT (to_terminal_info, t);
609 /* Do not inherit to_kill. */
610 INHERIT (to_load, t);
611 /* Do no inherit to_create_inferior. */
612 INHERIT (to_post_startup_inferior, t);
613 INHERIT (to_insert_fork_catchpoint, t);
614 INHERIT (to_remove_fork_catchpoint, t);
615 INHERIT (to_insert_vfork_catchpoint, t);
616 INHERIT (to_remove_vfork_catchpoint, t);
617 /* Do not inherit to_follow_fork. */
618 INHERIT (to_insert_exec_catchpoint, t);
619 INHERIT (to_remove_exec_catchpoint, t);
620 INHERIT (to_set_syscall_catchpoint, t);
621 INHERIT (to_has_exited, t);
622 /* Do not inherit to_mourn_inferior. */
623 INHERIT (to_can_run, t);
624 /* Do not inherit to_pass_signals. */
625 /* Do not inherit to_program_signals. */
626 /* Do not inherit to_thread_alive. */
627 /* Do not inherit to_find_new_threads. */
628 /* Do not inherit to_pid_to_str. */
629 INHERIT (to_extra_thread_info, t);
630 INHERIT (to_thread_name, t);
631 INHERIT (to_stop, t);
632 /* Do not inherit to_xfer_partial. */
633 INHERIT (to_rcmd, t);
634 INHERIT (to_pid_to_exec_file, t);
635 INHERIT (to_log_command, t);
636 INHERIT (to_stratum, t);
637 /* Do not inherit to_has_all_memory. */
638 /* Do not inherit to_has_memory. */
639 /* Do not inherit to_has_stack. */
640 /* Do not inherit to_has_registers. */
641 /* Do not inherit to_has_execution. */
642 INHERIT (to_has_thread_control, t);
643 INHERIT (to_can_async_p, t);
644 INHERIT (to_is_async_p, t);
645 INHERIT (to_async, t);
646 INHERIT (to_find_memory_regions, t);
647 INHERIT (to_make_corefile_notes, t);
648 INHERIT (to_get_bookmark, t);
649 INHERIT (to_goto_bookmark, t);
650 /* Do not inherit to_get_thread_local_address. */
651 INHERIT (to_can_execute_reverse, t);
652 INHERIT (to_execution_direction, t);
653 INHERIT (to_thread_architecture, t);
654 /* Do not inherit to_read_description. */
655 INHERIT (to_get_ada_task_ptid, t);
656 /* Do not inherit to_search_memory. */
657 INHERIT (to_supports_multi_process, t);
658 INHERIT (to_supports_enable_disable_tracepoint, t);
659 INHERIT (to_supports_string_tracing, t);
660 INHERIT (to_trace_init, t);
661 INHERIT (to_download_tracepoint, t);
662 INHERIT (to_can_download_tracepoint, t);
663 INHERIT (to_download_trace_state_variable, t);
664 INHERIT (to_enable_tracepoint, t);
665 INHERIT (to_disable_tracepoint, t);
666 INHERIT (to_trace_set_readonly_regions, t);
667 INHERIT (to_trace_start, t);
668 INHERIT (to_get_trace_status, t);
669 INHERIT (to_get_tracepoint_status, t);
670 INHERIT (to_trace_stop, t);
671 INHERIT (to_trace_find, t);
672 INHERIT (to_get_trace_state_variable_value, t);
673 INHERIT (to_save_trace_data, t);
674 INHERIT (to_upload_tracepoints, t);
675 INHERIT (to_upload_trace_state_variables, t);
676 INHERIT (to_get_raw_trace_data, t);
677 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
678 INHERIT (to_set_disconnected_tracing, t);
679 INHERIT (to_set_circular_trace_buffer, t);
680 INHERIT (to_set_trace_buffer_size, t);
681 INHERIT (to_set_trace_notes, t);
682 INHERIT (to_get_tib_address, t);
683 INHERIT (to_set_permissions, t);
684 INHERIT (to_static_tracepoint_marker_at, t);
685 INHERIT (to_static_tracepoint_markers_by_strid, t);
686 INHERIT (to_traceframe_info, t);
687 INHERIT (to_use_agent, t);
688 INHERIT (to_can_use_agent, t);
689 INHERIT (to_augmented_libraries_svr4_read, t);
690 INHERIT (to_magic, t);
691 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
692 INHERIT (to_can_run_breakpoint_commands, t);
693 /* Do not inherit to_memory_map. */
694 /* Do not inherit to_flash_erase. */
695 /* Do not inherit to_flash_done. */
696 }
697 #undef INHERIT
698
699 /* Clean up a target struct so it no longer has any zero pointers in
700 it. Some entries are defaulted to a method that print an error,
701 others are hard-wired to a standard recursive default. */
702
703 #define de_fault(field, value) \
704 if (!current_target.field) \
705 current_target.field = value
706
707 de_fault (to_open,
708 (void (*) (char *, int))
709 tcomplain);
710 de_fault (to_close,
711 (void (*) (void))
712 target_ignore);
713 de_fault (to_post_attach,
714 (void (*) (int))
715 target_ignore);
716 de_fault (to_prepare_to_store,
717 (void (*) (struct regcache *))
718 noprocess);
719 de_fault (deprecated_xfer_memory,
720 (int (*) (CORE_ADDR, gdb_byte *, int, int,
721 struct mem_attrib *, struct target_ops *))
722 nomemory);
723 de_fault (to_files_info,
724 (void (*) (struct target_ops *))
725 target_ignore);
726 de_fault (to_insert_breakpoint,
727 memory_insert_breakpoint);
728 de_fault (to_remove_breakpoint,
729 memory_remove_breakpoint);
730 de_fault (to_can_use_hw_breakpoint,
731 (int (*) (int, int, int))
732 return_zero);
733 de_fault (to_insert_hw_breakpoint,
734 (int (*) (struct gdbarch *, struct bp_target_info *))
735 return_minus_one);
736 de_fault (to_remove_hw_breakpoint,
737 (int (*) (struct gdbarch *, struct bp_target_info *))
738 return_minus_one);
739 de_fault (to_insert_watchpoint,
740 (int (*) (CORE_ADDR, int, int, struct expression *))
741 return_minus_one);
742 de_fault (to_remove_watchpoint,
743 (int (*) (CORE_ADDR, int, int, struct expression *))
744 return_minus_one);
745 de_fault (to_stopped_by_watchpoint,
746 (int (*) (void))
747 return_zero);
748 de_fault (to_stopped_data_address,
749 (int (*) (struct target_ops *, CORE_ADDR *))
750 return_zero);
751 de_fault (to_watchpoint_addr_within_range,
752 default_watchpoint_addr_within_range);
753 de_fault (to_region_ok_for_hw_watchpoint,
754 default_region_ok_for_hw_watchpoint);
755 de_fault (to_can_accel_watchpoint_condition,
756 (int (*) (CORE_ADDR, int, int, struct expression *))
757 return_zero);
758 de_fault (to_terminal_init,
759 (void (*) (void))
760 target_ignore);
761 de_fault (to_terminal_inferior,
762 (void (*) (void))
763 target_ignore);
764 de_fault (to_terminal_ours_for_output,
765 (void (*) (void))
766 target_ignore);
767 de_fault (to_terminal_ours,
768 (void (*) (void))
769 target_ignore);
770 de_fault (to_terminal_save_ours,
771 (void (*) (void))
772 target_ignore);
773 de_fault (to_terminal_info,
774 default_terminal_info);
775 de_fault (to_load,
776 (void (*) (char *, int))
777 tcomplain);
778 de_fault (to_post_startup_inferior,
779 (void (*) (ptid_t))
780 target_ignore);
781 de_fault (to_insert_fork_catchpoint,
782 (int (*) (int))
783 return_one);
784 de_fault (to_remove_fork_catchpoint,
785 (int (*) (int))
786 return_one);
787 de_fault (to_insert_vfork_catchpoint,
788 (int (*) (int))
789 return_one);
790 de_fault (to_remove_vfork_catchpoint,
791 (int (*) (int))
792 return_one);
793 de_fault (to_insert_exec_catchpoint,
794 (int (*) (int))
795 return_one);
796 de_fault (to_remove_exec_catchpoint,
797 (int (*) (int))
798 return_one);
799 de_fault (to_set_syscall_catchpoint,
800 (int (*) (int, int, int, int, int *))
801 return_one);
802 de_fault (to_has_exited,
803 (int (*) (int, int, int *))
804 return_zero);
805 de_fault (to_can_run,
806 return_zero);
807 de_fault (to_extra_thread_info,
808 (char *(*) (struct thread_info *))
809 return_zero);
810 de_fault (to_thread_name,
811 (char *(*) (struct thread_info *))
812 return_zero);
813 de_fault (to_stop,
814 (void (*) (ptid_t))
815 target_ignore);
816 current_target.to_xfer_partial = current_xfer_partial;
817 de_fault (to_rcmd,
818 (void (*) (char *, struct ui_file *))
819 tcomplain);
820 de_fault (to_pid_to_exec_file,
821 (char *(*) (int))
822 return_zero);
823 de_fault (to_async,
824 (void (*) (void (*) (enum inferior_event_type, void*), void*))
825 tcomplain);
826 de_fault (to_thread_architecture,
827 default_thread_architecture);
828 current_target.to_read_description = NULL;
829 de_fault (to_get_ada_task_ptid,
830 (ptid_t (*) (long, long))
831 default_get_ada_task_ptid);
832 de_fault (to_supports_multi_process,
833 (int (*) (void))
834 return_zero);
835 de_fault (to_supports_enable_disable_tracepoint,
836 (int (*) (void))
837 return_zero);
838 de_fault (to_supports_string_tracing,
839 (int (*) (void))
840 return_zero);
841 de_fault (to_trace_init,
842 (void (*) (void))
843 tcomplain);
844 de_fault (to_download_tracepoint,
845 (void (*) (struct bp_location *))
846 tcomplain);
847 de_fault (to_can_download_tracepoint,
848 (int (*) (void))
849 return_zero);
850 de_fault (to_download_trace_state_variable,
851 (void (*) (struct trace_state_variable *))
852 tcomplain);
853 de_fault (to_enable_tracepoint,
854 (void (*) (struct bp_location *))
855 tcomplain);
856 de_fault (to_disable_tracepoint,
857 (void (*) (struct bp_location *))
858 tcomplain);
859 de_fault (to_trace_set_readonly_regions,
860 (void (*) (void))
861 tcomplain);
862 de_fault (to_trace_start,
863 (void (*) (void))
864 tcomplain);
865 de_fault (to_get_trace_status,
866 (int (*) (struct trace_status *))
867 return_minus_one);
868 de_fault (to_get_tracepoint_status,
869 (void (*) (struct breakpoint *, struct uploaded_tp *))
870 tcomplain);
871 de_fault (to_trace_stop,
872 (void (*) (void))
873 tcomplain);
874 de_fault (to_trace_find,
875 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
876 return_minus_one);
877 de_fault (to_get_trace_state_variable_value,
878 (int (*) (int, LONGEST *))
879 return_zero);
880 de_fault (to_save_trace_data,
881 (int (*) (const char *))
882 tcomplain);
883 de_fault (to_upload_tracepoints,
884 (int (*) (struct uploaded_tp **))
885 return_zero);
886 de_fault (to_upload_trace_state_variables,
887 (int (*) (struct uploaded_tsv **))
888 return_zero);
889 de_fault (to_get_raw_trace_data,
890 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
891 tcomplain);
892 de_fault (to_get_min_fast_tracepoint_insn_len,
893 (int (*) (void))
894 return_minus_one);
895 de_fault (to_set_disconnected_tracing,
896 (void (*) (int))
897 target_ignore);
898 de_fault (to_set_circular_trace_buffer,
899 (void (*) (int))
900 target_ignore);
901 de_fault (to_set_trace_buffer_size,
902 (void (*) (LONGEST))
903 target_ignore);
904 de_fault (to_set_trace_notes,
905 (int (*) (const char *, const char *, const char *))
906 return_zero);
907 de_fault (to_get_tib_address,
908 (int (*) (ptid_t, CORE_ADDR *))
909 tcomplain);
910 de_fault (to_set_permissions,
911 (void (*) (void))
912 target_ignore);
913 de_fault (to_static_tracepoint_marker_at,
914 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
915 return_zero);
916 de_fault (to_static_tracepoint_markers_by_strid,
917 (VEC(static_tracepoint_marker_p) * (*) (const char *))
918 tcomplain);
919 de_fault (to_traceframe_info,
920 (struct traceframe_info * (*) (void))
921 return_zero);
922 de_fault (to_supports_evaluation_of_breakpoint_conditions,
923 (int (*) (void))
924 return_zero);
925 de_fault (to_can_run_breakpoint_commands,
926 (int (*) (void))
927 return_zero);
928 de_fault (to_use_agent,
929 (int (*) (int))
930 tcomplain);
931 de_fault (to_can_use_agent,
932 (int (*) (void))
933 return_zero);
934 de_fault (to_augmented_libraries_svr4_read,
935 (int (*) (void))
936 return_zero);
937 de_fault (to_execution_direction, default_execution_direction);
938
939 #undef de_fault
940
941 /* Finally, position the target-stack beneath the squashed
942 "current_target". That way code looking for a non-inherited
943 target method can quickly and simply find it. */
944 current_target.beneath = target_stack;
945
946 if (targetdebug)
947 setup_target_debug ();
948 }
949
950 /* Push a new target type into the stack of the existing target accessors,
951 possibly superseding some of the existing accessors.
952
953 Rather than allow an empty stack, we always have the dummy target at
954 the bottom stratum, so we can call the function vectors without
955 checking them. */
956
957 void
958 push_target (struct target_ops *t)
959 {
960 struct target_ops **cur;
961
962 /* Check magic number. If wrong, it probably means someone changed
963 the struct definition, but not all the places that initialize one. */
964 if (t->to_magic != OPS_MAGIC)
965 {
966 fprintf_unfiltered (gdb_stderr,
967 "Magic number of %s target struct wrong\n",
968 t->to_shortname);
969 internal_error (__FILE__, __LINE__,
970 _("failed internal consistency check"));
971 }
972
973 /* Find the proper stratum to install this target in. */
974 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
975 {
976 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
977 break;
978 }
979
980 /* If there's already targets at this stratum, remove them. */
981 /* FIXME: cagney/2003-10-15: I think this should be popping all
982 targets to CUR, and not just those at this stratum level. */
983 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
984 {
985 /* There's already something at this stratum level. Close it,
986 and un-hook it from the stack. */
987 struct target_ops *tmp = (*cur);
988
989 (*cur) = (*cur)->beneath;
990 tmp->beneath = NULL;
991 target_close (tmp);
992 }
993
994 /* We have removed all targets in our stratum, now add the new one. */
995 t->beneath = (*cur);
996 (*cur) = t;
997
998 update_current_target ();
999 }
1000
1001 /* Remove a target_ops vector from the stack, wherever it may be.
1002 Return how many times it was removed (0 or 1). */
1003
1004 int
1005 unpush_target (struct target_ops *t)
1006 {
1007 struct target_ops **cur;
1008 struct target_ops *tmp;
1009
1010 if (t->to_stratum == dummy_stratum)
1011 internal_error (__FILE__, __LINE__,
1012 _("Attempt to unpush the dummy target"));
1013
1014 /* Look for the specified target. Note that we assume that a target
1015 can only occur once in the target stack. */
1016
1017 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1018 {
1019 if ((*cur) == t)
1020 break;
1021 }
1022
1023 /* If we don't find target_ops, quit. Only open targets should be
1024 closed. */
1025 if ((*cur) == NULL)
1026 return 0;
1027
1028 /* Unchain the target. */
1029 tmp = (*cur);
1030 (*cur) = (*cur)->beneath;
1031 tmp->beneath = NULL;
1032
1033 update_current_target ();
1034
1035 /* Finally close the target. Note we do this after unchaining, so
1036 any target method calls from within the target_close
1037 implementation don't end up in T anymore. */
1038 target_close (t);
1039
1040 return 1;
1041 }
1042
1043 void
1044 pop_all_targets_above (enum strata above_stratum)
1045 {
1046 while ((int) (current_target.to_stratum) > (int) above_stratum)
1047 {
1048 if (!unpush_target (target_stack))
1049 {
1050 fprintf_unfiltered (gdb_stderr,
1051 "pop_all_targets couldn't find target %s\n",
1052 target_stack->to_shortname);
1053 internal_error (__FILE__, __LINE__,
1054 _("failed internal consistency check"));
1055 break;
1056 }
1057 }
1058 }
1059
1060 void
1061 pop_all_targets (void)
1062 {
1063 pop_all_targets_above (dummy_stratum);
1064 }
1065
1066 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1067
1068 int
1069 target_is_pushed (struct target_ops *t)
1070 {
1071 struct target_ops **cur;
1072
1073 /* Check magic number. If wrong, it probably means someone changed
1074 the struct definition, but not all the places that initialize one. */
1075 if (t->to_magic != OPS_MAGIC)
1076 {
1077 fprintf_unfiltered (gdb_stderr,
1078 "Magic number of %s target struct wrong\n",
1079 t->to_shortname);
1080 internal_error (__FILE__, __LINE__,
1081 _("failed internal consistency check"));
1082 }
1083
1084 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1085 if (*cur == t)
1086 return 1;
1087
1088 return 0;
1089 }
1090
1091 /* Using the objfile specified in OBJFILE, find the address for the
1092 current thread's thread-local storage with offset OFFSET. */
1093 CORE_ADDR
1094 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1095 {
1096 volatile CORE_ADDR addr = 0;
1097 struct target_ops *target;
1098
1099 for (target = current_target.beneath;
1100 target != NULL;
1101 target = target->beneath)
1102 {
1103 if (target->to_get_thread_local_address != NULL)
1104 break;
1105 }
1106
1107 if (target != NULL
1108 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1109 {
1110 ptid_t ptid = inferior_ptid;
1111 volatile struct gdb_exception ex;
1112
1113 TRY_CATCH (ex, RETURN_MASK_ALL)
1114 {
1115 CORE_ADDR lm_addr;
1116
1117 /* Fetch the load module address for this objfile. */
1118 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1119 objfile);
1120 /* If it's 0, throw the appropriate exception. */
1121 if (lm_addr == 0)
1122 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1123 _("TLS load module not found"));
1124
1125 addr = target->to_get_thread_local_address (target, ptid,
1126 lm_addr, offset);
1127 }
1128 /* If an error occurred, print TLS related messages here. Otherwise,
1129 throw the error to some higher catcher. */
1130 if (ex.reason < 0)
1131 {
1132 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1133
1134 switch (ex.error)
1135 {
1136 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1137 error (_("Cannot find thread-local variables "
1138 "in this thread library."));
1139 break;
1140 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1141 if (objfile_is_library)
1142 error (_("Cannot find shared library `%s' in dynamic"
1143 " linker's load module list"), objfile_name (objfile));
1144 else
1145 error (_("Cannot find executable file `%s' in dynamic"
1146 " linker's load module list"), objfile_name (objfile));
1147 break;
1148 case TLS_NOT_ALLOCATED_YET_ERROR:
1149 if (objfile_is_library)
1150 error (_("The inferior has not yet allocated storage for"
1151 " thread-local variables in\n"
1152 "the shared library `%s'\n"
1153 "for %s"),
1154 objfile_name (objfile), target_pid_to_str (ptid));
1155 else
1156 error (_("The inferior has not yet allocated storage for"
1157 " thread-local variables in\n"
1158 "the executable `%s'\n"
1159 "for %s"),
1160 objfile_name (objfile), target_pid_to_str (ptid));
1161 break;
1162 case TLS_GENERIC_ERROR:
1163 if (objfile_is_library)
1164 error (_("Cannot find thread-local storage for %s, "
1165 "shared library %s:\n%s"),
1166 target_pid_to_str (ptid),
1167 objfile_name (objfile), ex.message);
1168 else
1169 error (_("Cannot find thread-local storage for %s, "
1170 "executable file %s:\n%s"),
1171 target_pid_to_str (ptid),
1172 objfile_name (objfile), ex.message);
1173 break;
1174 default:
1175 throw_exception (ex);
1176 break;
1177 }
1178 }
1179 }
1180 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1181 TLS is an ABI-specific thing. But we don't do that yet. */
1182 else
1183 error (_("Cannot find thread-local variables on this target"));
1184
1185 return addr;
1186 }
1187
1188 const char *
1189 target_xfer_error_to_string (enum target_xfer_error err)
1190 {
1191 #define CASE(X) case X: return #X
1192 switch (err)
1193 {
1194 CASE(TARGET_XFER_E_IO);
1195 CASE(TARGET_XFER_E_UNAVAILABLE);
1196 default:
1197 return "<unknown>";
1198 }
1199 #undef CASE
1200 };
1201
1202
1203 #undef MIN
1204 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1205
1206 /* target_read_string -- read a null terminated string, up to LEN bytes,
1207 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1208 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1209 is responsible for freeing it. Return the number of bytes successfully
1210 read. */
1211
1212 int
1213 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1214 {
1215 int tlen, offset, i;
1216 gdb_byte buf[4];
1217 int errcode = 0;
1218 char *buffer;
1219 int buffer_allocated;
1220 char *bufptr;
1221 unsigned int nbytes_read = 0;
1222
1223 gdb_assert (string);
1224
1225 /* Small for testing. */
1226 buffer_allocated = 4;
1227 buffer = xmalloc (buffer_allocated);
1228 bufptr = buffer;
1229
1230 while (len > 0)
1231 {
1232 tlen = MIN (len, 4 - (memaddr & 3));
1233 offset = memaddr & 3;
1234
1235 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1236 if (errcode != 0)
1237 {
1238 /* The transfer request might have crossed the boundary to an
1239 unallocated region of memory. Retry the transfer, requesting
1240 a single byte. */
1241 tlen = 1;
1242 offset = 0;
1243 errcode = target_read_memory (memaddr, buf, 1);
1244 if (errcode != 0)
1245 goto done;
1246 }
1247
1248 if (bufptr - buffer + tlen > buffer_allocated)
1249 {
1250 unsigned int bytes;
1251
1252 bytes = bufptr - buffer;
1253 buffer_allocated *= 2;
1254 buffer = xrealloc (buffer, buffer_allocated);
1255 bufptr = buffer + bytes;
1256 }
1257
1258 for (i = 0; i < tlen; i++)
1259 {
1260 *bufptr++ = buf[i + offset];
1261 if (buf[i + offset] == '\000')
1262 {
1263 nbytes_read += i + 1;
1264 goto done;
1265 }
1266 }
1267
1268 memaddr += tlen;
1269 len -= tlen;
1270 nbytes_read += tlen;
1271 }
1272 done:
1273 *string = buffer;
1274 if (errnop != NULL)
1275 *errnop = errcode;
1276 return nbytes_read;
1277 }
1278
1279 struct target_section_table *
1280 target_get_section_table (struct target_ops *target)
1281 {
1282 struct target_ops *t;
1283
1284 if (targetdebug)
1285 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1286
1287 for (t = target; t != NULL; t = t->beneath)
1288 if (t->to_get_section_table != NULL)
1289 return (*t->to_get_section_table) (t);
1290
1291 return NULL;
1292 }
1293
1294 /* Find a section containing ADDR. */
1295
1296 struct target_section *
1297 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1298 {
1299 struct target_section_table *table = target_get_section_table (target);
1300 struct target_section *secp;
1301
1302 if (table == NULL)
1303 return NULL;
1304
1305 for (secp = table->sections; secp < table->sections_end; secp++)
1306 {
1307 if (addr >= secp->addr && addr < secp->endaddr)
1308 return secp;
1309 }
1310 return NULL;
1311 }
1312
1313 /* Read memory from the live target, even if currently inspecting a
1314 traceframe. The return is the same as that of target_read. */
1315
1316 static LONGEST
1317 target_read_live_memory (enum target_object object,
1318 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1319 {
1320 LONGEST ret;
1321 struct cleanup *cleanup;
1322
1323 /* Switch momentarily out of tfind mode so to access live memory.
1324 Note that this must not clear global state, such as the frame
1325 cache, which must still remain valid for the previous traceframe.
1326 We may be _building_ the frame cache at this point. */
1327 cleanup = make_cleanup_restore_traceframe_number ();
1328 set_traceframe_number (-1);
1329
1330 ret = target_read (current_target.beneath, object, NULL,
1331 myaddr, memaddr, len);
1332
1333 do_cleanups (cleanup);
1334 return ret;
1335 }
1336
1337 /* Using the set of read-only target sections of OPS, read live
1338 read-only memory. Note that the actual reads start from the
1339 top-most target again.
1340
1341 For interface/parameters/return description see target.h,
1342 to_xfer_partial. */
1343
1344 static LONGEST
1345 memory_xfer_live_readonly_partial (struct target_ops *ops,
1346 enum target_object object,
1347 gdb_byte *readbuf, ULONGEST memaddr,
1348 LONGEST len)
1349 {
1350 struct target_section *secp;
1351 struct target_section_table *table;
1352
1353 secp = target_section_by_addr (ops, memaddr);
1354 if (secp != NULL
1355 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1356 secp->the_bfd_section)
1357 & SEC_READONLY))
1358 {
1359 struct target_section *p;
1360 ULONGEST memend = memaddr + len;
1361
1362 table = target_get_section_table (ops);
1363
1364 for (p = table->sections; p < table->sections_end; p++)
1365 {
1366 if (memaddr >= p->addr)
1367 {
1368 if (memend <= p->endaddr)
1369 {
1370 /* Entire transfer is within this section. */
1371 return target_read_live_memory (object, memaddr,
1372 readbuf, len);
1373 }
1374 else if (memaddr >= p->endaddr)
1375 {
1376 /* This section ends before the transfer starts. */
1377 continue;
1378 }
1379 else
1380 {
1381 /* This section overlaps the transfer. Just do half. */
1382 len = p->endaddr - memaddr;
1383 return target_read_live_memory (object, memaddr,
1384 readbuf, len);
1385 }
1386 }
1387 }
1388 }
1389
1390 return 0;
1391 }
1392
1393 /* Read memory from more than one valid target. A core file, for
1394 instance, could have some of memory but delegate other bits to
1395 the target below it. So, we must manually try all targets. */
1396
1397 static LONGEST
1398 raw_memory_xfer_partial (struct target_ops *ops, void *readbuf,
1399 const void *writebuf, ULONGEST memaddr, LONGEST len)
1400 {
1401 LONGEST res;
1402
1403 do
1404 {
1405 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1406 readbuf, writebuf, memaddr, len);
1407 if (res > 0)
1408 break;
1409
1410 /* We want to continue past core files to executables, but not
1411 past a running target's memory. */
1412 if (ops->to_has_all_memory (ops))
1413 break;
1414
1415 ops = ops->beneath;
1416 }
1417 while (ops != NULL);
1418
1419 return res;
1420 }
1421
1422 /* Perform a partial memory transfer.
1423 For docs see target.h, to_xfer_partial. */
1424
1425 static LONGEST
1426 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1427 void *readbuf, const void *writebuf, ULONGEST memaddr,
1428 LONGEST len)
1429 {
1430 LONGEST res;
1431 int reg_len;
1432 struct mem_region *region;
1433 struct inferior *inf;
1434
1435 /* For accesses to unmapped overlay sections, read directly from
1436 files. Must do this first, as MEMADDR may need adjustment. */
1437 if (readbuf != NULL && overlay_debugging)
1438 {
1439 struct obj_section *section = find_pc_overlay (memaddr);
1440
1441 if (pc_in_unmapped_range (memaddr, section))
1442 {
1443 struct target_section_table *table
1444 = target_get_section_table (ops);
1445 const char *section_name = section->the_bfd_section->name;
1446
1447 memaddr = overlay_mapped_address (memaddr, section);
1448 return section_table_xfer_memory_partial (readbuf, writebuf,
1449 memaddr, len,
1450 table->sections,
1451 table->sections_end,
1452 section_name);
1453 }
1454 }
1455
1456 /* Try the executable files, if "trust-readonly-sections" is set. */
1457 if (readbuf != NULL && trust_readonly)
1458 {
1459 struct target_section *secp;
1460 struct target_section_table *table;
1461
1462 secp = target_section_by_addr (ops, memaddr);
1463 if (secp != NULL
1464 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1465 secp->the_bfd_section)
1466 & SEC_READONLY))
1467 {
1468 table = target_get_section_table (ops);
1469 return section_table_xfer_memory_partial (readbuf, writebuf,
1470 memaddr, len,
1471 table->sections,
1472 table->sections_end,
1473 NULL);
1474 }
1475 }
1476
1477 /* If reading unavailable memory in the context of traceframes, and
1478 this address falls within a read-only section, fallback to
1479 reading from live memory. */
1480 if (readbuf != NULL && get_traceframe_number () != -1)
1481 {
1482 VEC(mem_range_s) *available;
1483
1484 /* If we fail to get the set of available memory, then the
1485 target does not support querying traceframe info, and so we
1486 attempt reading from the traceframe anyway (assuming the
1487 target implements the old QTro packet then). */
1488 if (traceframe_available_memory (&available, memaddr, len))
1489 {
1490 struct cleanup *old_chain;
1491
1492 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1493
1494 if (VEC_empty (mem_range_s, available)
1495 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1496 {
1497 /* Don't read into the traceframe's available
1498 memory. */
1499 if (!VEC_empty (mem_range_s, available))
1500 {
1501 LONGEST oldlen = len;
1502
1503 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1504 gdb_assert (len <= oldlen);
1505 }
1506
1507 do_cleanups (old_chain);
1508
1509 /* This goes through the topmost target again. */
1510 res = memory_xfer_live_readonly_partial (ops, object,
1511 readbuf, memaddr, len);
1512 if (res > 0)
1513 return res;
1514
1515 /* No use trying further, we know some memory starting
1516 at MEMADDR isn't available. */
1517 return TARGET_XFER_E_UNAVAILABLE;
1518 }
1519
1520 /* Don't try to read more than how much is available, in
1521 case the target implements the deprecated QTro packet to
1522 cater for older GDBs (the target's knowledge of read-only
1523 sections may be outdated by now). */
1524 len = VEC_index (mem_range_s, available, 0)->length;
1525
1526 do_cleanups (old_chain);
1527 }
1528 }
1529
1530 /* Try GDB's internal data cache. */
1531 region = lookup_mem_region (memaddr);
1532 /* region->hi == 0 means there's no upper bound. */
1533 if (memaddr + len < region->hi || region->hi == 0)
1534 reg_len = len;
1535 else
1536 reg_len = region->hi - memaddr;
1537
1538 switch (region->attrib.mode)
1539 {
1540 case MEM_RO:
1541 if (writebuf != NULL)
1542 return -1;
1543 break;
1544
1545 case MEM_WO:
1546 if (readbuf != NULL)
1547 return -1;
1548 break;
1549
1550 case MEM_FLASH:
1551 /* We only support writing to flash during "load" for now. */
1552 if (writebuf != NULL)
1553 error (_("Writing to flash memory forbidden in this context"));
1554 break;
1555
1556 case MEM_NONE:
1557 return -1;
1558 }
1559
1560 if (!ptid_equal (inferior_ptid, null_ptid))
1561 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1562 else
1563 inf = NULL;
1564
1565 if (inf != NULL
1566 /* The dcache reads whole cache lines; that doesn't play well
1567 with reading from a trace buffer, because reading outside of
1568 the collected memory range fails. */
1569 && get_traceframe_number () == -1
1570 && (region->attrib.cache
1571 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1572 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1573 {
1574 DCACHE *dcache = target_dcache_get_or_init ();
1575
1576 if (readbuf != NULL)
1577 res = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1578 else
1579 /* FIXME drow/2006-08-09: If we're going to preserve const
1580 correctness dcache_xfer_memory should take readbuf and
1581 writebuf. */
1582 res = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1583 reg_len, 1);
1584 if (res <= 0)
1585 return -1;
1586 else
1587 return res;
1588 }
1589
1590 /* If none of those methods found the memory we wanted, fall back
1591 to a target partial transfer. Normally a single call to
1592 to_xfer_partial is enough; if it doesn't recognize an object
1593 it will call the to_xfer_partial of the next target down.
1594 But for memory this won't do. Memory is the only target
1595 object which can be read from more than one valid target. */
1596 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len);
1597
1598 /* Make sure the cache gets updated no matter what - if we are writing
1599 to the stack. Even if this write is not tagged as such, we still need
1600 to update the cache. */
1601
1602 if (res > 0
1603 && inf != NULL
1604 && writebuf != NULL
1605 && target_dcache_init_p ()
1606 && !region->attrib.cache
1607 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1608 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1609 {
1610 DCACHE *dcache = target_dcache_get ();
1611
1612 dcache_update (dcache, memaddr, (void *) writebuf, res);
1613 }
1614
1615 /* If we still haven't got anything, return the last error. We
1616 give up. */
1617 return res;
1618 }
1619
1620 /* Perform a partial memory transfer. For docs see target.h,
1621 to_xfer_partial. */
1622
1623 static LONGEST
1624 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1625 void *readbuf, const void *writebuf, ULONGEST memaddr,
1626 LONGEST len)
1627 {
1628 int res;
1629
1630 /* Zero length requests are ok and require no work. */
1631 if (len == 0)
1632 return 0;
1633
1634 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1635 breakpoint insns, thus hiding out from higher layers whether
1636 there are software breakpoints inserted in the code stream. */
1637 if (readbuf != NULL)
1638 {
1639 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1640
1641 if (res > 0 && !show_memory_breakpoints)
1642 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1643 }
1644 else
1645 {
1646 void *buf;
1647 struct cleanup *old_chain;
1648
1649 /* A large write request is likely to be partially satisfied
1650 by memory_xfer_partial_1. We will continually malloc
1651 and free a copy of the entire write request for breakpoint
1652 shadow handling even though we only end up writing a small
1653 subset of it. Cap writes to 4KB to mitigate this. */
1654 len = min (4096, len);
1655
1656 buf = xmalloc (len);
1657 old_chain = make_cleanup (xfree, buf);
1658 memcpy (buf, writebuf, len);
1659
1660 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1661 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1662
1663 do_cleanups (old_chain);
1664 }
1665
1666 return res;
1667 }
1668
1669 static void
1670 restore_show_memory_breakpoints (void *arg)
1671 {
1672 show_memory_breakpoints = (uintptr_t) arg;
1673 }
1674
1675 struct cleanup *
1676 make_show_memory_breakpoints_cleanup (int show)
1677 {
1678 int current = show_memory_breakpoints;
1679
1680 show_memory_breakpoints = show;
1681 return make_cleanup (restore_show_memory_breakpoints,
1682 (void *) (uintptr_t) current);
1683 }
1684
1685 /* For docs see target.h, to_xfer_partial. */
1686
1687 LONGEST
1688 target_xfer_partial (struct target_ops *ops,
1689 enum target_object object, const char *annex,
1690 gdb_byte *readbuf, const gdb_byte *writebuf,
1691 ULONGEST offset, LONGEST len)
1692 {
1693 LONGEST retval;
1694
1695 gdb_assert (ops->to_xfer_partial != NULL);
1696
1697 if (writebuf && !may_write_memory)
1698 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1699 core_addr_to_string_nz (offset), plongest (len));
1700
1701 /* If this is a memory transfer, let the memory-specific code
1702 have a look at it instead. Memory transfers are more
1703 complicated. */
1704 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1705 || object == TARGET_OBJECT_CODE_MEMORY)
1706 retval = memory_xfer_partial (ops, object, readbuf,
1707 writebuf, offset, len);
1708 else if (object == TARGET_OBJECT_RAW_MEMORY)
1709 {
1710 /* Request the normal memory object from other layers. */
1711 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1712 }
1713 else
1714 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1715 writebuf, offset, len);
1716
1717 if (targetdebug)
1718 {
1719 const unsigned char *myaddr = NULL;
1720
1721 fprintf_unfiltered (gdb_stdlog,
1722 "%s:target_xfer_partial "
1723 "(%d, %s, %s, %s, %s, %s) = %s",
1724 ops->to_shortname,
1725 (int) object,
1726 (annex ? annex : "(null)"),
1727 host_address_to_string (readbuf),
1728 host_address_to_string (writebuf),
1729 core_addr_to_string_nz (offset),
1730 plongest (len), plongest (retval));
1731
1732 if (readbuf)
1733 myaddr = readbuf;
1734 if (writebuf)
1735 myaddr = writebuf;
1736 if (retval > 0 && myaddr != NULL)
1737 {
1738 int i;
1739
1740 fputs_unfiltered (", bytes =", gdb_stdlog);
1741 for (i = 0; i < retval; i++)
1742 {
1743 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1744 {
1745 if (targetdebug < 2 && i > 0)
1746 {
1747 fprintf_unfiltered (gdb_stdlog, " ...");
1748 break;
1749 }
1750 fprintf_unfiltered (gdb_stdlog, "\n");
1751 }
1752
1753 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1754 }
1755 }
1756
1757 fputc_unfiltered ('\n', gdb_stdlog);
1758 }
1759 return retval;
1760 }
1761
1762 /* Read LEN bytes of target memory at address MEMADDR, placing the
1763 results in GDB's memory at MYADDR. Returns either 0 for success or
1764 a target_xfer_error value if any error occurs.
1765
1766 If an error occurs, no guarantee is made about the contents of the data at
1767 MYADDR. In particular, the caller should not depend upon partial reads
1768 filling the buffer with good data. There is no way for the caller to know
1769 how much good data might have been transfered anyway. Callers that can
1770 deal with partial reads should call target_read (which will retry until
1771 it makes no progress, and then return how much was transferred). */
1772
1773 int
1774 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1775 {
1776 /* Dispatch to the topmost target, not the flattened current_target.
1777 Memory accesses check target->to_has_(all_)memory, and the
1778 flattened target doesn't inherit those. */
1779 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1780 myaddr, memaddr, len) == len)
1781 return 0;
1782 else
1783 return TARGET_XFER_E_IO;
1784 }
1785
1786 /* Like target_read_memory, but specify explicitly that this is a read
1787 from the target's raw memory. That is, this read bypasses the
1788 dcache, breakpoint shadowing, etc. */
1789
1790 int
1791 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1792 {
1793 /* See comment in target_read_memory about why the request starts at
1794 current_target.beneath. */
1795 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1796 myaddr, memaddr, len) == len)
1797 return 0;
1798 else
1799 return TARGET_XFER_E_IO;
1800 }
1801
1802 /* Like target_read_memory, but specify explicitly that this is a read from
1803 the target's stack. This may trigger different cache behavior. */
1804
1805 int
1806 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1807 {
1808 /* See comment in target_read_memory about why the request starts at
1809 current_target.beneath. */
1810 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1811 myaddr, memaddr, len) == len)
1812 return 0;
1813 else
1814 return TARGET_XFER_E_IO;
1815 }
1816
1817 /* Like target_read_memory, but specify explicitly that this is a read from
1818 the target's code. This may trigger different cache behavior. */
1819
1820 int
1821 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1822 {
1823 /* See comment in target_read_memory about why the request starts at
1824 current_target.beneath. */
1825 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1826 myaddr, memaddr, len) == len)
1827 return 0;
1828 else
1829 return TARGET_XFER_E_IO;
1830 }
1831
1832 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1833 Returns either 0 for success or a target_xfer_error value if any
1834 error occurs. If an error occurs, no guarantee is made about how
1835 much data got written. Callers that can deal with partial writes
1836 should call target_write. */
1837
1838 int
1839 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1840 {
1841 /* See comment in target_read_memory about why the request starts at
1842 current_target.beneath. */
1843 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1844 myaddr, memaddr, len) == len)
1845 return 0;
1846 else
1847 return TARGET_XFER_E_IO;
1848 }
1849
1850 /* Write LEN bytes from MYADDR to target raw memory at address
1851 MEMADDR. Returns either 0 for success or a target_xfer_error value
1852 if any error occurs. If an error occurs, no guarantee is made
1853 about how much data got written. Callers that can deal with
1854 partial writes should call target_write. */
1855
1856 int
1857 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1858 {
1859 /* See comment in target_read_memory about why the request starts at
1860 current_target.beneath. */
1861 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1862 myaddr, memaddr, len) == len)
1863 return 0;
1864 else
1865 return TARGET_XFER_E_IO;
1866 }
1867
1868 /* Fetch the target's memory map. */
1869
1870 VEC(mem_region_s) *
1871 target_memory_map (void)
1872 {
1873 VEC(mem_region_s) *result;
1874 struct mem_region *last_one, *this_one;
1875 int ix;
1876 struct target_ops *t;
1877
1878 if (targetdebug)
1879 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1880
1881 for (t = current_target.beneath; t != NULL; t = t->beneath)
1882 if (t->to_memory_map != NULL)
1883 break;
1884
1885 if (t == NULL)
1886 return NULL;
1887
1888 result = t->to_memory_map (t);
1889 if (result == NULL)
1890 return NULL;
1891
1892 qsort (VEC_address (mem_region_s, result),
1893 VEC_length (mem_region_s, result),
1894 sizeof (struct mem_region), mem_region_cmp);
1895
1896 /* Check that regions do not overlap. Simultaneously assign
1897 a numbering for the "mem" commands to use to refer to
1898 each region. */
1899 last_one = NULL;
1900 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1901 {
1902 this_one->number = ix;
1903
1904 if (last_one && last_one->hi > this_one->lo)
1905 {
1906 warning (_("Overlapping regions in memory map: ignoring"));
1907 VEC_free (mem_region_s, result);
1908 return NULL;
1909 }
1910 last_one = this_one;
1911 }
1912
1913 return result;
1914 }
1915
1916 void
1917 target_flash_erase (ULONGEST address, LONGEST length)
1918 {
1919 struct target_ops *t;
1920
1921 for (t = current_target.beneath; t != NULL; t = t->beneath)
1922 if (t->to_flash_erase != NULL)
1923 {
1924 if (targetdebug)
1925 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1926 hex_string (address), phex (length, 0));
1927 t->to_flash_erase (t, address, length);
1928 return;
1929 }
1930
1931 tcomplain ();
1932 }
1933
1934 void
1935 target_flash_done (void)
1936 {
1937 struct target_ops *t;
1938
1939 for (t = current_target.beneath; t != NULL; t = t->beneath)
1940 if (t->to_flash_done != NULL)
1941 {
1942 if (targetdebug)
1943 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1944 t->to_flash_done (t);
1945 return;
1946 }
1947
1948 tcomplain ();
1949 }
1950
1951 static void
1952 show_trust_readonly (struct ui_file *file, int from_tty,
1953 struct cmd_list_element *c, const char *value)
1954 {
1955 fprintf_filtered (file,
1956 _("Mode for reading from readonly sections is %s.\n"),
1957 value);
1958 }
1959
1960 /* More generic transfers. */
1961
1962 static LONGEST
1963 default_xfer_partial (struct target_ops *ops, enum target_object object,
1964 const char *annex, gdb_byte *readbuf,
1965 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1966 {
1967 if (object == TARGET_OBJECT_MEMORY
1968 && ops->deprecated_xfer_memory != NULL)
1969 /* If available, fall back to the target's
1970 "deprecated_xfer_memory" method. */
1971 {
1972 int xfered = -1;
1973
1974 errno = 0;
1975 if (writebuf != NULL)
1976 {
1977 void *buffer = xmalloc (len);
1978 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1979
1980 memcpy (buffer, writebuf, len);
1981 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1982 1/*write*/, NULL, ops);
1983 do_cleanups (cleanup);
1984 }
1985 if (readbuf != NULL)
1986 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1987 0/*read*/, NULL, ops);
1988 if (xfered > 0)
1989 return xfered;
1990 else if (xfered == 0 && errno == 0)
1991 /* "deprecated_xfer_memory" uses 0, cross checked against
1992 ERRNO as one indication of an error. */
1993 return 0;
1994 else
1995 return -1;
1996 }
1997 else if (ops->beneath != NULL)
1998 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1999 readbuf, writebuf, offset, len);
2000 else
2001 return -1;
2002 }
2003
2004 /* The xfer_partial handler for the topmost target. Unlike the default,
2005 it does not need to handle memory specially; it just passes all
2006 requests down the stack. */
2007
2008 static LONGEST
2009 current_xfer_partial (struct target_ops *ops, enum target_object object,
2010 const char *annex, gdb_byte *readbuf,
2011 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2012 {
2013 if (ops->beneath != NULL)
2014 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2015 readbuf, writebuf, offset, len);
2016 else
2017 return -1;
2018 }
2019
2020 /* Target vector read/write partial wrapper functions. */
2021
2022 static LONGEST
2023 target_read_partial (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027 {
2028 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2029 }
2030
2031 static LONGEST
2032 target_write_partial (struct target_ops *ops,
2033 enum target_object object,
2034 const char *annex, const gdb_byte *buf,
2035 ULONGEST offset, LONGEST len)
2036 {
2037 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2038 }
2039
2040 /* Wrappers to perform the full transfer. */
2041
2042 /* For docs on target_read see target.h. */
2043
2044 LONGEST
2045 target_read (struct target_ops *ops,
2046 enum target_object object,
2047 const char *annex, gdb_byte *buf,
2048 ULONGEST offset, LONGEST len)
2049 {
2050 LONGEST xfered = 0;
2051
2052 while (xfered < len)
2053 {
2054 LONGEST xfer = target_read_partial (ops, object, annex,
2055 (gdb_byte *) buf + xfered,
2056 offset + xfered, len - xfered);
2057
2058 /* Call an observer, notifying them of the xfer progress? */
2059 if (xfer == 0)
2060 return xfered;
2061 if (xfer < 0)
2062 return -1;
2063 xfered += xfer;
2064 QUIT;
2065 }
2066 return len;
2067 }
2068
2069 /* Assuming that the entire [begin, end) range of memory cannot be
2070 read, try to read whatever subrange is possible to read.
2071
2072 The function returns, in RESULT, either zero or one memory block.
2073 If there's a readable subrange at the beginning, it is completely
2074 read and returned. Any further readable subrange will not be read.
2075 Otherwise, if there's a readable subrange at the end, it will be
2076 completely read and returned. Any readable subranges before it
2077 (obviously, not starting at the beginning), will be ignored. In
2078 other cases -- either no readable subrange, or readable subrange(s)
2079 that is neither at the beginning, or end, nothing is returned.
2080
2081 The purpose of this function is to handle a read across a boundary
2082 of accessible memory in a case when memory map is not available.
2083 The above restrictions are fine for this case, but will give
2084 incorrect results if the memory is 'patchy'. However, supporting
2085 'patchy' memory would require trying to read every single byte,
2086 and it seems unacceptable solution. Explicit memory map is
2087 recommended for this case -- and target_read_memory_robust will
2088 take care of reading multiple ranges then. */
2089
2090 static void
2091 read_whatever_is_readable (struct target_ops *ops,
2092 ULONGEST begin, ULONGEST end,
2093 VEC(memory_read_result_s) **result)
2094 {
2095 gdb_byte *buf = xmalloc (end - begin);
2096 ULONGEST current_begin = begin;
2097 ULONGEST current_end = end;
2098 int forward;
2099 memory_read_result_s r;
2100
2101 /* If we previously failed to read 1 byte, nothing can be done here. */
2102 if (end - begin <= 1)
2103 {
2104 xfree (buf);
2105 return;
2106 }
2107
2108 /* Check that either first or the last byte is readable, and give up
2109 if not. This heuristic is meant to permit reading accessible memory
2110 at the boundary of accessible region. */
2111 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2112 buf, begin, 1) == 1)
2113 {
2114 forward = 1;
2115 ++current_begin;
2116 }
2117 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf + (end-begin) - 1, end - 1, 1) == 1)
2119 {
2120 forward = 0;
2121 --current_end;
2122 }
2123 else
2124 {
2125 xfree (buf);
2126 return;
2127 }
2128
2129 /* Loop invariant is that the [current_begin, current_end) was previously
2130 found to be not readable as a whole.
2131
2132 Note loop condition -- if the range has 1 byte, we can't divide the range
2133 so there's no point trying further. */
2134 while (current_end - current_begin > 1)
2135 {
2136 ULONGEST first_half_begin, first_half_end;
2137 ULONGEST second_half_begin, second_half_end;
2138 LONGEST xfer;
2139 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2140
2141 if (forward)
2142 {
2143 first_half_begin = current_begin;
2144 first_half_end = middle;
2145 second_half_begin = middle;
2146 second_half_end = current_end;
2147 }
2148 else
2149 {
2150 first_half_begin = middle;
2151 first_half_end = current_end;
2152 second_half_begin = current_begin;
2153 second_half_end = middle;
2154 }
2155
2156 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2157 buf + (first_half_begin - begin),
2158 first_half_begin,
2159 first_half_end - first_half_begin);
2160
2161 if (xfer == first_half_end - first_half_begin)
2162 {
2163 /* This half reads up fine. So, the error must be in the
2164 other half. */
2165 current_begin = second_half_begin;
2166 current_end = second_half_end;
2167 }
2168 else
2169 {
2170 /* This half is not readable. Because we've tried one byte, we
2171 know some part of this half if actually redable. Go to the next
2172 iteration to divide again and try to read.
2173
2174 We don't handle the other half, because this function only tries
2175 to read a single readable subrange. */
2176 current_begin = first_half_begin;
2177 current_end = first_half_end;
2178 }
2179 }
2180
2181 if (forward)
2182 {
2183 /* The [begin, current_begin) range has been read. */
2184 r.begin = begin;
2185 r.end = current_begin;
2186 r.data = buf;
2187 }
2188 else
2189 {
2190 /* The [current_end, end) range has been read. */
2191 LONGEST rlen = end - current_end;
2192
2193 r.data = xmalloc (rlen);
2194 memcpy (r.data, buf + current_end - begin, rlen);
2195 r.begin = current_end;
2196 r.end = end;
2197 xfree (buf);
2198 }
2199 VEC_safe_push(memory_read_result_s, (*result), &r);
2200 }
2201
2202 void
2203 free_memory_read_result_vector (void *x)
2204 {
2205 VEC(memory_read_result_s) *v = x;
2206 memory_read_result_s *current;
2207 int ix;
2208
2209 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2210 {
2211 xfree (current->data);
2212 }
2213 VEC_free (memory_read_result_s, v);
2214 }
2215
2216 VEC(memory_read_result_s) *
2217 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2218 {
2219 VEC(memory_read_result_s) *result = 0;
2220
2221 LONGEST xfered = 0;
2222 while (xfered < len)
2223 {
2224 struct mem_region *region = lookup_mem_region (offset + xfered);
2225 LONGEST rlen;
2226
2227 /* If there is no explicit region, a fake one should be created. */
2228 gdb_assert (region);
2229
2230 if (region->hi == 0)
2231 rlen = len - xfered;
2232 else
2233 rlen = region->hi - offset;
2234
2235 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2236 {
2237 /* Cannot read this region. Note that we can end up here only
2238 if the region is explicitly marked inaccessible, or
2239 'inaccessible-by-default' is in effect. */
2240 xfered += rlen;
2241 }
2242 else
2243 {
2244 LONGEST to_read = min (len - xfered, rlen);
2245 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2246
2247 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2248 (gdb_byte *) buffer,
2249 offset + xfered, to_read);
2250 /* Call an observer, notifying them of the xfer progress? */
2251 if (xfer <= 0)
2252 {
2253 /* Got an error reading full chunk. See if maybe we can read
2254 some subrange. */
2255 xfree (buffer);
2256 read_whatever_is_readable (ops, offset + xfered,
2257 offset + xfered + to_read, &result);
2258 xfered += to_read;
2259 }
2260 else
2261 {
2262 struct memory_read_result r;
2263 r.data = buffer;
2264 r.begin = offset + xfered;
2265 r.end = r.begin + xfer;
2266 VEC_safe_push (memory_read_result_s, result, &r);
2267 xfered += xfer;
2268 }
2269 QUIT;
2270 }
2271 }
2272 return result;
2273 }
2274
2275
2276 /* An alternative to target_write with progress callbacks. */
2277
2278 LONGEST
2279 target_write_with_progress (struct target_ops *ops,
2280 enum target_object object,
2281 const char *annex, const gdb_byte *buf,
2282 ULONGEST offset, LONGEST len,
2283 void (*progress) (ULONGEST, void *), void *baton)
2284 {
2285 LONGEST xfered = 0;
2286
2287 /* Give the progress callback a chance to set up. */
2288 if (progress)
2289 (*progress) (0, baton);
2290
2291 while (xfered < len)
2292 {
2293 LONGEST xfer = target_write_partial (ops, object, annex,
2294 (gdb_byte *) buf + xfered,
2295 offset + xfered, len - xfered);
2296
2297 if (xfer == 0)
2298 return xfered;
2299 if (xfer < 0)
2300 return -1;
2301
2302 if (progress)
2303 (*progress) (xfer, baton);
2304
2305 xfered += xfer;
2306 QUIT;
2307 }
2308 return len;
2309 }
2310
2311 /* For docs on target_write see target.h. */
2312
2313 LONGEST
2314 target_write (struct target_ops *ops,
2315 enum target_object object,
2316 const char *annex, const gdb_byte *buf,
2317 ULONGEST offset, LONGEST len)
2318 {
2319 return target_write_with_progress (ops, object, annex, buf, offset, len,
2320 NULL, NULL);
2321 }
2322
2323 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2324 the size of the transferred data. PADDING additional bytes are
2325 available in *BUF_P. This is a helper function for
2326 target_read_alloc; see the declaration of that function for more
2327 information. */
2328
2329 static LONGEST
2330 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2331 const char *annex, gdb_byte **buf_p, int padding)
2332 {
2333 size_t buf_alloc, buf_pos;
2334 gdb_byte *buf;
2335 LONGEST n;
2336
2337 /* This function does not have a length parameter; it reads the
2338 entire OBJECT). Also, it doesn't support objects fetched partly
2339 from one target and partly from another (in a different stratum,
2340 e.g. a core file and an executable). Both reasons make it
2341 unsuitable for reading memory. */
2342 gdb_assert (object != TARGET_OBJECT_MEMORY);
2343
2344 /* Start by reading up to 4K at a time. The target will throttle
2345 this number down if necessary. */
2346 buf_alloc = 4096;
2347 buf = xmalloc (buf_alloc);
2348 buf_pos = 0;
2349 while (1)
2350 {
2351 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2352 buf_pos, buf_alloc - buf_pos - padding);
2353 if (n < 0)
2354 {
2355 /* An error occurred. */
2356 xfree (buf);
2357 return -1;
2358 }
2359 else if (n == 0)
2360 {
2361 /* Read all there was. */
2362 if (buf_pos == 0)
2363 xfree (buf);
2364 else
2365 *buf_p = buf;
2366 return buf_pos;
2367 }
2368
2369 buf_pos += n;
2370
2371 /* If the buffer is filling up, expand it. */
2372 if (buf_alloc < buf_pos * 2)
2373 {
2374 buf_alloc *= 2;
2375 buf = xrealloc (buf, buf_alloc);
2376 }
2377
2378 QUIT;
2379 }
2380 }
2381
2382 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2383 the size of the transferred data. See the declaration in "target.h"
2384 function for more information about the return value. */
2385
2386 LONGEST
2387 target_read_alloc (struct target_ops *ops, enum target_object object,
2388 const char *annex, gdb_byte **buf_p)
2389 {
2390 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2391 }
2392
2393 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2394 returned as a string, allocated using xmalloc. If an error occurs
2395 or the transfer is unsupported, NULL is returned. Empty objects
2396 are returned as allocated but empty strings. A warning is issued
2397 if the result contains any embedded NUL bytes. */
2398
2399 char *
2400 target_read_stralloc (struct target_ops *ops, enum target_object object,
2401 const char *annex)
2402 {
2403 gdb_byte *buffer;
2404 char *bufstr;
2405 LONGEST i, transferred;
2406
2407 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2408 bufstr = (char *) buffer;
2409
2410 if (transferred < 0)
2411 return NULL;
2412
2413 if (transferred == 0)
2414 return xstrdup ("");
2415
2416 bufstr[transferred] = 0;
2417
2418 /* Check for embedded NUL bytes; but allow trailing NULs. */
2419 for (i = strlen (bufstr); i < transferred; i++)
2420 if (bufstr[i] != 0)
2421 {
2422 warning (_("target object %d, annex %s, "
2423 "contained unexpected null characters"),
2424 (int) object, annex ? annex : "(none)");
2425 break;
2426 }
2427
2428 return bufstr;
2429 }
2430
2431 /* Memory transfer methods. */
2432
2433 void
2434 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2435 LONGEST len)
2436 {
2437 /* This method is used to read from an alternate, non-current
2438 target. This read must bypass the overlay support (as symbols
2439 don't match this target), and GDB's internal cache (wrong cache
2440 for this target). */
2441 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2442 != len)
2443 memory_error (TARGET_XFER_E_IO, addr);
2444 }
2445
2446 ULONGEST
2447 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2448 int len, enum bfd_endian byte_order)
2449 {
2450 gdb_byte buf[sizeof (ULONGEST)];
2451
2452 gdb_assert (len <= sizeof (buf));
2453 get_target_memory (ops, addr, buf, len);
2454 return extract_unsigned_integer (buf, len, byte_order);
2455 }
2456
2457 int
2458 target_insert_breakpoint (struct gdbarch *gdbarch,
2459 struct bp_target_info *bp_tgt)
2460 {
2461 if (!may_insert_breakpoints)
2462 {
2463 warning (_("May not insert breakpoints"));
2464 return 1;
2465 }
2466
2467 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2468 }
2469
2470 int
2471 target_remove_breakpoint (struct gdbarch *gdbarch,
2472 struct bp_target_info *bp_tgt)
2473 {
2474 /* This is kind of a weird case to handle, but the permission might
2475 have been changed after breakpoints were inserted - in which case
2476 we should just take the user literally and assume that any
2477 breakpoints should be left in place. */
2478 if (!may_insert_breakpoints)
2479 {
2480 warning (_("May not remove breakpoints"));
2481 return 1;
2482 }
2483
2484 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2485 }
2486
2487 static void
2488 target_info (char *args, int from_tty)
2489 {
2490 struct target_ops *t;
2491 int has_all_mem = 0;
2492
2493 if (symfile_objfile != NULL)
2494 printf_unfiltered (_("Symbols from \"%s\".\n"),
2495 objfile_name (symfile_objfile));
2496
2497 for (t = target_stack; t != NULL; t = t->beneath)
2498 {
2499 if (!(*t->to_has_memory) (t))
2500 continue;
2501
2502 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2503 continue;
2504 if (has_all_mem)
2505 printf_unfiltered (_("\tWhile running this, "
2506 "GDB does not access memory from...\n"));
2507 printf_unfiltered ("%s:\n", t->to_longname);
2508 (t->to_files_info) (t);
2509 has_all_mem = (*t->to_has_all_memory) (t);
2510 }
2511 }
2512
2513 /* This function is called before any new inferior is created, e.g.
2514 by running a program, attaching, or connecting to a target.
2515 It cleans up any state from previous invocations which might
2516 change between runs. This is a subset of what target_preopen
2517 resets (things which might change between targets). */
2518
2519 void
2520 target_pre_inferior (int from_tty)
2521 {
2522 /* Clear out solib state. Otherwise the solib state of the previous
2523 inferior might have survived and is entirely wrong for the new
2524 target. This has been observed on GNU/Linux using glibc 2.3. How
2525 to reproduce:
2526
2527 bash$ ./foo&
2528 [1] 4711
2529 bash$ ./foo&
2530 [1] 4712
2531 bash$ gdb ./foo
2532 [...]
2533 (gdb) attach 4711
2534 (gdb) detach
2535 (gdb) attach 4712
2536 Cannot access memory at address 0xdeadbeef
2537 */
2538
2539 /* In some OSs, the shared library list is the same/global/shared
2540 across inferiors. If code is shared between processes, so are
2541 memory regions and features. */
2542 if (!gdbarch_has_global_solist (target_gdbarch ()))
2543 {
2544 no_shared_libraries (NULL, from_tty);
2545
2546 invalidate_target_mem_regions ();
2547
2548 target_clear_description ();
2549 }
2550
2551 agent_capability_invalidate ();
2552 }
2553
2554 /* Callback for iterate_over_inferiors. Gets rid of the given
2555 inferior. */
2556
2557 static int
2558 dispose_inferior (struct inferior *inf, void *args)
2559 {
2560 struct thread_info *thread;
2561
2562 thread = any_thread_of_process (inf->pid);
2563 if (thread)
2564 {
2565 switch_to_thread (thread->ptid);
2566
2567 /* Core inferiors actually should be detached, not killed. */
2568 if (target_has_execution)
2569 target_kill ();
2570 else
2571 target_detach (NULL, 0);
2572 }
2573
2574 return 0;
2575 }
2576
2577 /* This is to be called by the open routine before it does
2578 anything. */
2579
2580 void
2581 target_preopen (int from_tty)
2582 {
2583 dont_repeat ();
2584
2585 if (have_inferiors ())
2586 {
2587 if (!from_tty
2588 || !have_live_inferiors ()
2589 || query (_("A program is being debugged already. Kill it? ")))
2590 iterate_over_inferiors (dispose_inferior, NULL);
2591 else
2592 error (_("Program not killed."));
2593 }
2594
2595 /* Calling target_kill may remove the target from the stack. But if
2596 it doesn't (which seems like a win for UDI), remove it now. */
2597 /* Leave the exec target, though. The user may be switching from a
2598 live process to a core of the same program. */
2599 pop_all_targets_above (file_stratum);
2600
2601 target_pre_inferior (from_tty);
2602 }
2603
2604 /* Detach a target after doing deferred register stores. */
2605
2606 void
2607 target_detach (const char *args, int from_tty)
2608 {
2609 struct target_ops* t;
2610
2611 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2612 /* Don't remove global breakpoints here. They're removed on
2613 disconnection from the target. */
2614 ;
2615 else
2616 /* If we're in breakpoints-always-inserted mode, have to remove
2617 them before detaching. */
2618 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2619
2620 prepare_for_detach ();
2621
2622 for (t = current_target.beneath; t != NULL; t = t->beneath)
2623 {
2624 if (t->to_detach != NULL)
2625 {
2626 t->to_detach (t, args, from_tty);
2627 if (targetdebug)
2628 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2629 args, from_tty);
2630 return;
2631 }
2632 }
2633
2634 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2635 }
2636
2637 void
2638 target_disconnect (char *args, int from_tty)
2639 {
2640 struct target_ops *t;
2641
2642 /* If we're in breakpoints-always-inserted mode or if breakpoints
2643 are global across processes, we have to remove them before
2644 disconnecting. */
2645 remove_breakpoints ();
2646
2647 for (t = current_target.beneath; t != NULL; t = t->beneath)
2648 if (t->to_disconnect != NULL)
2649 {
2650 if (targetdebug)
2651 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2652 args, from_tty);
2653 t->to_disconnect (t, args, from_tty);
2654 return;
2655 }
2656
2657 tcomplain ();
2658 }
2659
2660 ptid_t
2661 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2662 {
2663 struct target_ops *t;
2664
2665 for (t = current_target.beneath; t != NULL; t = t->beneath)
2666 {
2667 if (t->to_wait != NULL)
2668 {
2669 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2670
2671 if (targetdebug)
2672 {
2673 char *status_string;
2674 char *options_string;
2675
2676 status_string = target_waitstatus_to_string (status);
2677 options_string = target_options_to_string (options);
2678 fprintf_unfiltered (gdb_stdlog,
2679 "target_wait (%d, status, options={%s})"
2680 " = %d, %s\n",
2681 ptid_get_pid (ptid), options_string,
2682 ptid_get_pid (retval), status_string);
2683 xfree (status_string);
2684 xfree (options_string);
2685 }
2686
2687 return retval;
2688 }
2689 }
2690
2691 noprocess ();
2692 }
2693
2694 char *
2695 target_pid_to_str (ptid_t ptid)
2696 {
2697 struct target_ops *t;
2698
2699 for (t = current_target.beneath; t != NULL; t = t->beneath)
2700 {
2701 if (t->to_pid_to_str != NULL)
2702 return (*t->to_pid_to_str) (t, ptid);
2703 }
2704
2705 return normal_pid_to_str (ptid);
2706 }
2707
2708 char *
2709 target_thread_name (struct thread_info *info)
2710 {
2711 struct target_ops *t;
2712
2713 for (t = current_target.beneath; t != NULL; t = t->beneath)
2714 {
2715 if (t->to_thread_name != NULL)
2716 return (*t->to_thread_name) (info);
2717 }
2718
2719 return NULL;
2720 }
2721
2722 void
2723 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2724 {
2725 struct target_ops *t;
2726
2727 target_dcache_invalidate ();
2728
2729 for (t = current_target.beneath; t != NULL; t = t->beneath)
2730 {
2731 if (t->to_resume != NULL)
2732 {
2733 t->to_resume (t, ptid, step, signal);
2734 if (targetdebug)
2735 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2736 ptid_get_pid (ptid),
2737 step ? "step" : "continue",
2738 gdb_signal_to_name (signal));
2739
2740 registers_changed_ptid (ptid);
2741 set_executing (ptid, 1);
2742 set_running (ptid, 1);
2743 clear_inline_frame_state (ptid);
2744 return;
2745 }
2746 }
2747
2748 noprocess ();
2749 }
2750
2751 void
2752 target_pass_signals (int numsigs, unsigned char *pass_signals)
2753 {
2754 struct target_ops *t;
2755
2756 for (t = current_target.beneath; t != NULL; t = t->beneath)
2757 {
2758 if (t->to_pass_signals != NULL)
2759 {
2760 if (targetdebug)
2761 {
2762 int i;
2763
2764 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2765 numsigs);
2766
2767 for (i = 0; i < numsigs; i++)
2768 if (pass_signals[i])
2769 fprintf_unfiltered (gdb_stdlog, " %s",
2770 gdb_signal_to_name (i));
2771
2772 fprintf_unfiltered (gdb_stdlog, " })\n");
2773 }
2774
2775 (*t->to_pass_signals) (numsigs, pass_signals);
2776 return;
2777 }
2778 }
2779 }
2780
2781 void
2782 target_program_signals (int numsigs, unsigned char *program_signals)
2783 {
2784 struct target_ops *t;
2785
2786 for (t = current_target.beneath; t != NULL; t = t->beneath)
2787 {
2788 if (t->to_program_signals != NULL)
2789 {
2790 if (targetdebug)
2791 {
2792 int i;
2793
2794 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2795 numsigs);
2796
2797 for (i = 0; i < numsigs; i++)
2798 if (program_signals[i])
2799 fprintf_unfiltered (gdb_stdlog, " %s",
2800 gdb_signal_to_name (i));
2801
2802 fprintf_unfiltered (gdb_stdlog, " })\n");
2803 }
2804
2805 (*t->to_program_signals) (numsigs, program_signals);
2806 return;
2807 }
2808 }
2809 }
2810
2811 /* Look through the list of possible targets for a target that can
2812 follow forks. */
2813
2814 int
2815 target_follow_fork (int follow_child, int detach_fork)
2816 {
2817 struct target_ops *t;
2818
2819 for (t = current_target.beneath; t != NULL; t = t->beneath)
2820 {
2821 if (t->to_follow_fork != NULL)
2822 {
2823 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2824
2825 if (targetdebug)
2826 fprintf_unfiltered (gdb_stdlog,
2827 "target_follow_fork (%d, %d) = %d\n",
2828 follow_child, detach_fork, retval);
2829 return retval;
2830 }
2831 }
2832
2833 /* Some target returned a fork event, but did not know how to follow it. */
2834 internal_error (__FILE__, __LINE__,
2835 _("could not find a target to follow fork"));
2836 }
2837
2838 void
2839 target_mourn_inferior (void)
2840 {
2841 struct target_ops *t;
2842
2843 for (t = current_target.beneath; t != NULL; t = t->beneath)
2844 {
2845 if (t->to_mourn_inferior != NULL)
2846 {
2847 t->to_mourn_inferior (t);
2848 if (targetdebug)
2849 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2850
2851 /* We no longer need to keep handles on any of the object files.
2852 Make sure to release them to avoid unnecessarily locking any
2853 of them while we're not actually debugging. */
2854 bfd_cache_close_all ();
2855
2856 return;
2857 }
2858 }
2859
2860 internal_error (__FILE__, __LINE__,
2861 _("could not find a target to follow mourn inferior"));
2862 }
2863
2864 /* Look for a target which can describe architectural features, starting
2865 from TARGET. If we find one, return its description. */
2866
2867 const struct target_desc *
2868 target_read_description (struct target_ops *target)
2869 {
2870 struct target_ops *t;
2871
2872 for (t = target; t != NULL; t = t->beneath)
2873 if (t->to_read_description != NULL)
2874 {
2875 const struct target_desc *tdesc;
2876
2877 tdesc = t->to_read_description (t);
2878 if (tdesc)
2879 return tdesc;
2880 }
2881
2882 return NULL;
2883 }
2884
2885 /* The default implementation of to_search_memory.
2886 This implements a basic search of memory, reading target memory and
2887 performing the search here (as opposed to performing the search in on the
2888 target side with, for example, gdbserver). */
2889
2890 int
2891 simple_search_memory (struct target_ops *ops,
2892 CORE_ADDR start_addr, ULONGEST search_space_len,
2893 const gdb_byte *pattern, ULONGEST pattern_len,
2894 CORE_ADDR *found_addrp)
2895 {
2896 /* NOTE: also defined in find.c testcase. */
2897 #define SEARCH_CHUNK_SIZE 16000
2898 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2899 /* Buffer to hold memory contents for searching. */
2900 gdb_byte *search_buf;
2901 unsigned search_buf_size;
2902 struct cleanup *old_cleanups;
2903
2904 search_buf_size = chunk_size + pattern_len - 1;
2905
2906 /* No point in trying to allocate a buffer larger than the search space. */
2907 if (search_space_len < search_buf_size)
2908 search_buf_size = search_space_len;
2909
2910 search_buf = malloc (search_buf_size);
2911 if (search_buf == NULL)
2912 error (_("Unable to allocate memory to perform the search."));
2913 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2914
2915 /* Prime the search buffer. */
2916
2917 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2918 search_buf, start_addr, search_buf_size) != search_buf_size)
2919 {
2920 warning (_("Unable to access %s bytes of target "
2921 "memory at %s, halting search."),
2922 pulongest (search_buf_size), hex_string (start_addr));
2923 do_cleanups (old_cleanups);
2924 return -1;
2925 }
2926
2927 /* Perform the search.
2928
2929 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2930 When we've scanned N bytes we copy the trailing bytes to the start and
2931 read in another N bytes. */
2932
2933 while (search_space_len >= pattern_len)
2934 {
2935 gdb_byte *found_ptr;
2936 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2937
2938 found_ptr = memmem (search_buf, nr_search_bytes,
2939 pattern, pattern_len);
2940
2941 if (found_ptr != NULL)
2942 {
2943 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2944
2945 *found_addrp = found_addr;
2946 do_cleanups (old_cleanups);
2947 return 1;
2948 }
2949
2950 /* Not found in this chunk, skip to next chunk. */
2951
2952 /* Don't let search_space_len wrap here, it's unsigned. */
2953 if (search_space_len >= chunk_size)
2954 search_space_len -= chunk_size;
2955 else
2956 search_space_len = 0;
2957
2958 if (search_space_len >= pattern_len)
2959 {
2960 unsigned keep_len = search_buf_size - chunk_size;
2961 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2962 int nr_to_read;
2963
2964 /* Copy the trailing part of the previous iteration to the front
2965 of the buffer for the next iteration. */
2966 gdb_assert (keep_len == pattern_len - 1);
2967 memcpy (search_buf, search_buf + chunk_size, keep_len);
2968
2969 nr_to_read = min (search_space_len - keep_len, chunk_size);
2970
2971 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2972 search_buf + keep_len, read_addr,
2973 nr_to_read) != nr_to_read)
2974 {
2975 warning (_("Unable to access %s bytes of target "
2976 "memory at %s, halting search."),
2977 plongest (nr_to_read),
2978 hex_string (read_addr));
2979 do_cleanups (old_cleanups);
2980 return -1;
2981 }
2982
2983 start_addr += chunk_size;
2984 }
2985 }
2986
2987 /* Not found. */
2988
2989 do_cleanups (old_cleanups);
2990 return 0;
2991 }
2992
2993 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2994 sequence of bytes in PATTERN with length PATTERN_LEN.
2995
2996 The result is 1 if found, 0 if not found, and -1 if there was an error
2997 requiring halting of the search (e.g. memory read error).
2998 If the pattern is found the address is recorded in FOUND_ADDRP. */
2999
3000 int
3001 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3002 const gdb_byte *pattern, ULONGEST pattern_len,
3003 CORE_ADDR *found_addrp)
3004 {
3005 struct target_ops *t;
3006 int found;
3007
3008 /* We don't use INHERIT to set current_target.to_search_memory,
3009 so we have to scan the target stack and handle targetdebug
3010 ourselves. */
3011
3012 if (targetdebug)
3013 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3014 hex_string (start_addr));
3015
3016 for (t = current_target.beneath; t != NULL; t = t->beneath)
3017 if (t->to_search_memory != NULL)
3018 break;
3019
3020 if (t != NULL)
3021 {
3022 found = t->to_search_memory (t, start_addr, search_space_len,
3023 pattern, pattern_len, found_addrp);
3024 }
3025 else
3026 {
3027 /* If a special version of to_search_memory isn't available, use the
3028 simple version. */
3029 found = simple_search_memory (current_target.beneath,
3030 start_addr, search_space_len,
3031 pattern, pattern_len, found_addrp);
3032 }
3033
3034 if (targetdebug)
3035 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3036
3037 return found;
3038 }
3039
3040 /* Look through the currently pushed targets. If none of them will
3041 be able to restart the currently running process, issue an error
3042 message. */
3043
3044 void
3045 target_require_runnable (void)
3046 {
3047 struct target_ops *t;
3048
3049 for (t = target_stack; t != NULL; t = t->beneath)
3050 {
3051 /* If this target knows how to create a new program, then
3052 assume we will still be able to after killing the current
3053 one. Either killing and mourning will not pop T, or else
3054 find_default_run_target will find it again. */
3055 if (t->to_create_inferior != NULL)
3056 return;
3057
3058 /* Do not worry about thread_stratum targets that can not
3059 create inferiors. Assume they will be pushed again if
3060 necessary, and continue to the process_stratum. */
3061 if (t->to_stratum == thread_stratum
3062 || t->to_stratum == arch_stratum)
3063 continue;
3064
3065 error (_("The \"%s\" target does not support \"run\". "
3066 "Try \"help target\" or \"continue\"."),
3067 t->to_shortname);
3068 }
3069
3070 /* This function is only called if the target is running. In that
3071 case there should have been a process_stratum target and it
3072 should either know how to create inferiors, or not... */
3073 internal_error (__FILE__, __LINE__, _("No targets found"));
3074 }
3075
3076 /* Look through the list of possible targets for a target that can
3077 execute a run or attach command without any other data. This is
3078 used to locate the default process stratum.
3079
3080 If DO_MESG is not NULL, the result is always valid (error() is
3081 called for errors); else, return NULL on error. */
3082
3083 static struct target_ops *
3084 find_default_run_target (char *do_mesg)
3085 {
3086 struct target_ops **t;
3087 struct target_ops *runable = NULL;
3088 int count;
3089
3090 count = 0;
3091
3092 for (t = target_structs; t < target_structs + target_struct_size;
3093 ++t)
3094 {
3095 if ((*t)->to_can_run && target_can_run (*t))
3096 {
3097 runable = *t;
3098 ++count;
3099 }
3100 }
3101
3102 if (count != 1)
3103 {
3104 if (do_mesg)
3105 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3106 else
3107 return NULL;
3108 }
3109
3110 return runable;
3111 }
3112
3113 void
3114 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3115 {
3116 struct target_ops *t;
3117
3118 t = find_default_run_target ("attach");
3119 (t->to_attach) (t, args, from_tty);
3120 return;
3121 }
3122
3123 void
3124 find_default_create_inferior (struct target_ops *ops,
3125 char *exec_file, char *allargs, char **env,
3126 int from_tty)
3127 {
3128 struct target_ops *t;
3129
3130 t = find_default_run_target ("run");
3131 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3132 return;
3133 }
3134
3135 static int
3136 find_default_can_async_p (void)
3137 {
3138 struct target_ops *t;
3139
3140 /* This may be called before the target is pushed on the stack;
3141 look for the default process stratum. If there's none, gdb isn't
3142 configured with a native debugger, and target remote isn't
3143 connected yet. */
3144 t = find_default_run_target (NULL);
3145 if (t && t->to_can_async_p)
3146 return (t->to_can_async_p) ();
3147 return 0;
3148 }
3149
3150 static int
3151 find_default_is_async_p (void)
3152 {
3153 struct target_ops *t;
3154
3155 /* This may be called before the target is pushed on the stack;
3156 look for the default process stratum. If there's none, gdb isn't
3157 configured with a native debugger, and target remote isn't
3158 connected yet. */
3159 t = find_default_run_target (NULL);
3160 if (t && t->to_is_async_p)
3161 return (t->to_is_async_p) ();
3162 return 0;
3163 }
3164
3165 static int
3166 find_default_supports_non_stop (void)
3167 {
3168 struct target_ops *t;
3169
3170 t = find_default_run_target (NULL);
3171 if (t && t->to_supports_non_stop)
3172 return (t->to_supports_non_stop) ();
3173 return 0;
3174 }
3175
3176 int
3177 target_supports_non_stop (void)
3178 {
3179 struct target_ops *t;
3180
3181 for (t = &current_target; t != NULL; t = t->beneath)
3182 if (t->to_supports_non_stop)
3183 return t->to_supports_non_stop ();
3184
3185 return 0;
3186 }
3187
3188 /* Implement the "info proc" command. */
3189
3190 int
3191 target_info_proc (char *args, enum info_proc_what what)
3192 {
3193 struct target_ops *t;
3194
3195 /* If we're already connected to something that can get us OS
3196 related data, use it. Otherwise, try using the native
3197 target. */
3198 if (current_target.to_stratum >= process_stratum)
3199 t = current_target.beneath;
3200 else
3201 t = find_default_run_target (NULL);
3202
3203 for (; t != NULL; t = t->beneath)
3204 {
3205 if (t->to_info_proc != NULL)
3206 {
3207 t->to_info_proc (t, args, what);
3208
3209 if (targetdebug)
3210 fprintf_unfiltered (gdb_stdlog,
3211 "target_info_proc (\"%s\", %d)\n", args, what);
3212
3213 return 1;
3214 }
3215 }
3216
3217 return 0;
3218 }
3219
3220 static int
3221 find_default_supports_disable_randomization (void)
3222 {
3223 struct target_ops *t;
3224
3225 t = find_default_run_target (NULL);
3226 if (t && t->to_supports_disable_randomization)
3227 return (t->to_supports_disable_randomization) ();
3228 return 0;
3229 }
3230
3231 int
3232 target_supports_disable_randomization (void)
3233 {
3234 struct target_ops *t;
3235
3236 for (t = &current_target; t != NULL; t = t->beneath)
3237 if (t->to_supports_disable_randomization)
3238 return t->to_supports_disable_randomization ();
3239
3240 return 0;
3241 }
3242
3243 char *
3244 target_get_osdata (const char *type)
3245 {
3246 struct target_ops *t;
3247
3248 /* If we're already connected to something that can get us OS
3249 related data, use it. Otherwise, try using the native
3250 target. */
3251 if (current_target.to_stratum >= process_stratum)
3252 t = current_target.beneath;
3253 else
3254 t = find_default_run_target ("get OS data");
3255
3256 if (!t)
3257 return NULL;
3258
3259 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3260 }
3261
3262 /* Determine the current address space of thread PTID. */
3263
3264 struct address_space *
3265 target_thread_address_space (ptid_t ptid)
3266 {
3267 struct address_space *aspace;
3268 struct inferior *inf;
3269 struct target_ops *t;
3270
3271 for (t = current_target.beneath; t != NULL; t = t->beneath)
3272 {
3273 if (t->to_thread_address_space != NULL)
3274 {
3275 aspace = t->to_thread_address_space (t, ptid);
3276 gdb_assert (aspace);
3277
3278 if (targetdebug)
3279 fprintf_unfiltered (gdb_stdlog,
3280 "target_thread_address_space (%s) = %d\n",
3281 target_pid_to_str (ptid),
3282 address_space_num (aspace));
3283 return aspace;
3284 }
3285 }
3286
3287 /* Fall-back to the "main" address space of the inferior. */
3288 inf = find_inferior_pid (ptid_get_pid (ptid));
3289
3290 if (inf == NULL || inf->aspace == NULL)
3291 internal_error (__FILE__, __LINE__,
3292 _("Can't determine the current "
3293 "address space of thread %s\n"),
3294 target_pid_to_str (ptid));
3295
3296 return inf->aspace;
3297 }
3298
3299
3300 /* Target file operations. */
3301
3302 static struct target_ops *
3303 default_fileio_target (void)
3304 {
3305 /* If we're already connected to something that can perform
3306 file I/O, use it. Otherwise, try using the native target. */
3307 if (current_target.to_stratum >= process_stratum)
3308 return current_target.beneath;
3309 else
3310 return find_default_run_target ("file I/O");
3311 }
3312
3313 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3314 target file descriptor, or -1 if an error occurs (and set
3315 *TARGET_ERRNO). */
3316 int
3317 target_fileio_open (const char *filename, int flags, int mode,
3318 int *target_errno)
3319 {
3320 struct target_ops *t;
3321
3322 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3323 {
3324 if (t->to_fileio_open != NULL)
3325 {
3326 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3327
3328 if (targetdebug)
3329 fprintf_unfiltered (gdb_stdlog,
3330 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3331 filename, flags, mode,
3332 fd, fd != -1 ? 0 : *target_errno);
3333 return fd;
3334 }
3335 }
3336
3337 *target_errno = FILEIO_ENOSYS;
3338 return -1;
3339 }
3340
3341 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3342 Return the number of bytes written, or -1 if an error occurs
3343 (and set *TARGET_ERRNO). */
3344 int
3345 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3346 ULONGEST offset, int *target_errno)
3347 {
3348 struct target_ops *t;
3349
3350 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3351 {
3352 if (t->to_fileio_pwrite != NULL)
3353 {
3354 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3355 target_errno);
3356
3357 if (targetdebug)
3358 fprintf_unfiltered (gdb_stdlog,
3359 "target_fileio_pwrite (%d,...,%d,%s) "
3360 "= %d (%d)\n",
3361 fd, len, pulongest (offset),
3362 ret, ret != -1 ? 0 : *target_errno);
3363 return ret;
3364 }
3365 }
3366
3367 *target_errno = FILEIO_ENOSYS;
3368 return -1;
3369 }
3370
3371 /* Read up to LEN bytes FD on the target into READ_BUF.
3372 Return the number of bytes read, or -1 if an error occurs
3373 (and set *TARGET_ERRNO). */
3374 int
3375 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3376 ULONGEST offset, int *target_errno)
3377 {
3378 struct target_ops *t;
3379
3380 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3381 {
3382 if (t->to_fileio_pread != NULL)
3383 {
3384 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3385 target_errno);
3386
3387 if (targetdebug)
3388 fprintf_unfiltered (gdb_stdlog,
3389 "target_fileio_pread (%d,...,%d,%s) "
3390 "= %d (%d)\n",
3391 fd, len, pulongest (offset),
3392 ret, ret != -1 ? 0 : *target_errno);
3393 return ret;
3394 }
3395 }
3396
3397 *target_errno = FILEIO_ENOSYS;
3398 return -1;
3399 }
3400
3401 /* Close FD on the target. Return 0, or -1 if an error occurs
3402 (and set *TARGET_ERRNO). */
3403 int
3404 target_fileio_close (int fd, int *target_errno)
3405 {
3406 struct target_ops *t;
3407
3408 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3409 {
3410 if (t->to_fileio_close != NULL)
3411 {
3412 int ret = t->to_fileio_close (fd, target_errno);
3413
3414 if (targetdebug)
3415 fprintf_unfiltered (gdb_stdlog,
3416 "target_fileio_close (%d) = %d (%d)\n",
3417 fd, ret, ret != -1 ? 0 : *target_errno);
3418 return ret;
3419 }
3420 }
3421
3422 *target_errno = FILEIO_ENOSYS;
3423 return -1;
3424 }
3425
3426 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3427 occurs (and set *TARGET_ERRNO). */
3428 int
3429 target_fileio_unlink (const char *filename, int *target_errno)
3430 {
3431 struct target_ops *t;
3432
3433 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3434 {
3435 if (t->to_fileio_unlink != NULL)
3436 {
3437 int ret = t->to_fileio_unlink (filename, target_errno);
3438
3439 if (targetdebug)
3440 fprintf_unfiltered (gdb_stdlog,
3441 "target_fileio_unlink (%s) = %d (%d)\n",
3442 filename, ret, ret != -1 ? 0 : *target_errno);
3443 return ret;
3444 }
3445 }
3446
3447 *target_errno = FILEIO_ENOSYS;
3448 return -1;
3449 }
3450
3451 /* Read value of symbolic link FILENAME on the target. Return a
3452 null-terminated string allocated via xmalloc, or NULL if an error
3453 occurs (and set *TARGET_ERRNO). */
3454 char *
3455 target_fileio_readlink (const char *filename, int *target_errno)
3456 {
3457 struct target_ops *t;
3458
3459 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3460 {
3461 if (t->to_fileio_readlink != NULL)
3462 {
3463 char *ret = t->to_fileio_readlink (filename, target_errno);
3464
3465 if (targetdebug)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "target_fileio_readlink (%s) = %s (%d)\n",
3468 filename, ret? ret : "(nil)",
3469 ret? 0 : *target_errno);
3470 return ret;
3471 }
3472 }
3473
3474 *target_errno = FILEIO_ENOSYS;
3475 return NULL;
3476 }
3477
3478 static void
3479 target_fileio_close_cleanup (void *opaque)
3480 {
3481 int fd = *(int *) opaque;
3482 int target_errno;
3483
3484 target_fileio_close (fd, &target_errno);
3485 }
3486
3487 /* Read target file FILENAME. Store the result in *BUF_P and
3488 return the size of the transferred data. PADDING additional bytes are
3489 available in *BUF_P. This is a helper function for
3490 target_fileio_read_alloc; see the declaration of that function for more
3491 information. */
3492
3493 static LONGEST
3494 target_fileio_read_alloc_1 (const char *filename,
3495 gdb_byte **buf_p, int padding)
3496 {
3497 struct cleanup *close_cleanup;
3498 size_t buf_alloc, buf_pos;
3499 gdb_byte *buf;
3500 LONGEST n;
3501 int fd;
3502 int target_errno;
3503
3504 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3505 if (fd == -1)
3506 return -1;
3507
3508 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3509
3510 /* Start by reading up to 4K at a time. The target will throttle
3511 this number down if necessary. */
3512 buf_alloc = 4096;
3513 buf = xmalloc (buf_alloc);
3514 buf_pos = 0;
3515 while (1)
3516 {
3517 n = target_fileio_pread (fd, &buf[buf_pos],
3518 buf_alloc - buf_pos - padding, buf_pos,
3519 &target_errno);
3520 if (n < 0)
3521 {
3522 /* An error occurred. */
3523 do_cleanups (close_cleanup);
3524 xfree (buf);
3525 return -1;
3526 }
3527 else if (n == 0)
3528 {
3529 /* Read all there was. */
3530 do_cleanups (close_cleanup);
3531 if (buf_pos == 0)
3532 xfree (buf);
3533 else
3534 *buf_p = buf;
3535 return buf_pos;
3536 }
3537
3538 buf_pos += n;
3539
3540 /* If the buffer is filling up, expand it. */
3541 if (buf_alloc < buf_pos * 2)
3542 {
3543 buf_alloc *= 2;
3544 buf = xrealloc (buf, buf_alloc);
3545 }
3546
3547 QUIT;
3548 }
3549 }
3550
3551 /* Read target file FILENAME. Store the result in *BUF_P and return
3552 the size of the transferred data. See the declaration in "target.h"
3553 function for more information about the return value. */
3554
3555 LONGEST
3556 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3557 {
3558 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3559 }
3560
3561 /* Read target file FILENAME. The result is NUL-terminated and
3562 returned as a string, allocated using xmalloc. If an error occurs
3563 or the transfer is unsupported, NULL is returned. Empty objects
3564 are returned as allocated but empty strings. A warning is issued
3565 if the result contains any embedded NUL bytes. */
3566
3567 char *
3568 target_fileio_read_stralloc (const char *filename)
3569 {
3570 gdb_byte *buffer;
3571 char *bufstr;
3572 LONGEST i, transferred;
3573
3574 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3575 bufstr = (char *) buffer;
3576
3577 if (transferred < 0)
3578 return NULL;
3579
3580 if (transferred == 0)
3581 return xstrdup ("");
3582
3583 bufstr[transferred] = 0;
3584
3585 /* Check for embedded NUL bytes; but allow trailing NULs. */
3586 for (i = strlen (bufstr); i < transferred; i++)
3587 if (bufstr[i] != 0)
3588 {
3589 warning (_("target file %s "
3590 "contained unexpected null characters"),
3591 filename);
3592 break;
3593 }
3594
3595 return bufstr;
3596 }
3597
3598
3599 static int
3600 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3601 {
3602 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3603 }
3604
3605 static int
3606 default_watchpoint_addr_within_range (struct target_ops *target,
3607 CORE_ADDR addr,
3608 CORE_ADDR start, int length)
3609 {
3610 return addr >= start && addr < start + length;
3611 }
3612
3613 static struct gdbarch *
3614 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3615 {
3616 return target_gdbarch ();
3617 }
3618
3619 static int
3620 return_zero (void)
3621 {
3622 return 0;
3623 }
3624
3625 static int
3626 return_one (void)
3627 {
3628 return 1;
3629 }
3630
3631 static int
3632 return_minus_one (void)
3633 {
3634 return -1;
3635 }
3636
3637 /*
3638 * Find the next target down the stack from the specified target.
3639 */
3640
3641 struct target_ops *
3642 find_target_beneath (struct target_ops *t)
3643 {
3644 return t->beneath;
3645 }
3646
3647 \f
3648 /* The inferior process has died. Long live the inferior! */
3649
3650 void
3651 generic_mourn_inferior (void)
3652 {
3653 ptid_t ptid;
3654
3655 ptid = inferior_ptid;
3656 inferior_ptid = null_ptid;
3657
3658 /* Mark breakpoints uninserted in case something tries to delete a
3659 breakpoint while we delete the inferior's threads (which would
3660 fail, since the inferior is long gone). */
3661 mark_breakpoints_out ();
3662
3663 if (!ptid_equal (ptid, null_ptid))
3664 {
3665 int pid = ptid_get_pid (ptid);
3666 exit_inferior (pid);
3667 }
3668
3669 /* Note this wipes step-resume breakpoints, so needs to be done
3670 after exit_inferior, which ends up referencing the step-resume
3671 breakpoints through clear_thread_inferior_resources. */
3672 breakpoint_init_inferior (inf_exited);
3673
3674 registers_changed ();
3675
3676 reopen_exec_file ();
3677 reinit_frame_cache ();
3678
3679 if (deprecated_detach_hook)
3680 deprecated_detach_hook ();
3681 }
3682 \f
3683 /* Convert a normal process ID to a string. Returns the string in a
3684 static buffer. */
3685
3686 char *
3687 normal_pid_to_str (ptid_t ptid)
3688 {
3689 static char buf[32];
3690
3691 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3692 return buf;
3693 }
3694
3695 static char *
3696 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3697 {
3698 return normal_pid_to_str (ptid);
3699 }
3700
3701 /* Error-catcher for target_find_memory_regions. */
3702 static int
3703 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3704 {
3705 error (_("Command not implemented for this target."));
3706 return 0;
3707 }
3708
3709 /* Error-catcher for target_make_corefile_notes. */
3710 static char *
3711 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3712 {
3713 error (_("Command not implemented for this target."));
3714 return NULL;
3715 }
3716
3717 /* Error-catcher for target_get_bookmark. */
3718 static gdb_byte *
3719 dummy_get_bookmark (char *ignore1, int ignore2)
3720 {
3721 tcomplain ();
3722 return NULL;
3723 }
3724
3725 /* Error-catcher for target_goto_bookmark. */
3726 static void
3727 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3728 {
3729 tcomplain ();
3730 }
3731
3732 /* Set up the handful of non-empty slots needed by the dummy target
3733 vector. */
3734
3735 static void
3736 init_dummy_target (void)
3737 {
3738 dummy_target.to_shortname = "None";
3739 dummy_target.to_longname = "None";
3740 dummy_target.to_doc = "";
3741 dummy_target.to_attach = find_default_attach;
3742 dummy_target.to_detach =
3743 (void (*)(struct target_ops *, const char *, int))target_ignore;
3744 dummy_target.to_create_inferior = find_default_create_inferior;
3745 dummy_target.to_can_async_p = find_default_can_async_p;
3746 dummy_target.to_is_async_p = find_default_is_async_p;
3747 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3748 dummy_target.to_supports_disable_randomization
3749 = find_default_supports_disable_randomization;
3750 dummy_target.to_pid_to_str = dummy_pid_to_str;
3751 dummy_target.to_stratum = dummy_stratum;
3752 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3753 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3754 dummy_target.to_get_bookmark = dummy_get_bookmark;
3755 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3756 dummy_target.to_xfer_partial = default_xfer_partial;
3757 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3758 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3759 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3760 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3761 dummy_target.to_has_execution
3762 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3763 dummy_target.to_stopped_by_watchpoint = return_zero;
3764 dummy_target.to_stopped_data_address =
3765 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3766 dummy_target.to_magic = OPS_MAGIC;
3767 }
3768 \f
3769 static void
3770 debug_to_open (char *args, int from_tty)
3771 {
3772 debug_target.to_open (args, from_tty);
3773
3774 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3775 }
3776
3777 void
3778 target_close (struct target_ops *targ)
3779 {
3780 gdb_assert (!target_is_pushed (targ));
3781
3782 if (targ->to_xclose != NULL)
3783 targ->to_xclose (targ);
3784 else if (targ->to_close != NULL)
3785 targ->to_close ();
3786
3787 if (targetdebug)
3788 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3789 }
3790
3791 void
3792 target_attach (char *args, int from_tty)
3793 {
3794 struct target_ops *t;
3795
3796 for (t = current_target.beneath; t != NULL; t = t->beneath)
3797 {
3798 if (t->to_attach != NULL)
3799 {
3800 t->to_attach (t, args, from_tty);
3801 if (targetdebug)
3802 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3803 args, from_tty);
3804 return;
3805 }
3806 }
3807
3808 internal_error (__FILE__, __LINE__,
3809 _("could not find a target to attach"));
3810 }
3811
3812 int
3813 target_thread_alive (ptid_t ptid)
3814 {
3815 struct target_ops *t;
3816
3817 for (t = current_target.beneath; t != NULL; t = t->beneath)
3818 {
3819 if (t->to_thread_alive != NULL)
3820 {
3821 int retval;
3822
3823 retval = t->to_thread_alive (t, ptid);
3824 if (targetdebug)
3825 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3826 ptid_get_pid (ptid), retval);
3827
3828 return retval;
3829 }
3830 }
3831
3832 return 0;
3833 }
3834
3835 void
3836 target_find_new_threads (void)
3837 {
3838 struct target_ops *t;
3839
3840 for (t = current_target.beneath; t != NULL; t = t->beneath)
3841 {
3842 if (t->to_find_new_threads != NULL)
3843 {
3844 t->to_find_new_threads (t);
3845 if (targetdebug)
3846 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3847
3848 return;
3849 }
3850 }
3851 }
3852
3853 void
3854 target_stop (ptid_t ptid)
3855 {
3856 if (!may_stop)
3857 {
3858 warning (_("May not interrupt or stop the target, ignoring attempt"));
3859 return;
3860 }
3861
3862 (*current_target.to_stop) (ptid);
3863 }
3864
3865 static void
3866 debug_to_post_attach (int pid)
3867 {
3868 debug_target.to_post_attach (pid);
3869
3870 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3871 }
3872
3873 /* Concatenate ELEM to LIST, a comma separate list, and return the
3874 result. The LIST incoming argument is released. */
3875
3876 static char *
3877 str_comma_list_concat_elem (char *list, const char *elem)
3878 {
3879 if (list == NULL)
3880 return xstrdup (elem);
3881 else
3882 return reconcat (list, list, ", ", elem, (char *) NULL);
3883 }
3884
3885 /* Helper for target_options_to_string. If OPT is present in
3886 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3887 Returns the new resulting string. OPT is removed from
3888 TARGET_OPTIONS. */
3889
3890 static char *
3891 do_option (int *target_options, char *ret,
3892 int opt, char *opt_str)
3893 {
3894 if ((*target_options & opt) != 0)
3895 {
3896 ret = str_comma_list_concat_elem (ret, opt_str);
3897 *target_options &= ~opt;
3898 }
3899
3900 return ret;
3901 }
3902
3903 char *
3904 target_options_to_string (int target_options)
3905 {
3906 char *ret = NULL;
3907
3908 #define DO_TARG_OPTION(OPT) \
3909 ret = do_option (&target_options, ret, OPT, #OPT)
3910
3911 DO_TARG_OPTION (TARGET_WNOHANG);
3912
3913 if (target_options != 0)
3914 ret = str_comma_list_concat_elem (ret, "unknown???");
3915
3916 if (ret == NULL)
3917 ret = xstrdup ("");
3918 return ret;
3919 }
3920
3921 static void
3922 debug_print_register (const char * func,
3923 struct regcache *regcache, int regno)
3924 {
3925 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3926
3927 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3928 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3929 && gdbarch_register_name (gdbarch, regno) != NULL
3930 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3931 fprintf_unfiltered (gdb_stdlog, "(%s)",
3932 gdbarch_register_name (gdbarch, regno));
3933 else
3934 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3935 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3936 {
3937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3938 int i, size = register_size (gdbarch, regno);
3939 gdb_byte buf[MAX_REGISTER_SIZE];
3940
3941 regcache_raw_collect (regcache, regno, buf);
3942 fprintf_unfiltered (gdb_stdlog, " = ");
3943 for (i = 0; i < size; i++)
3944 {
3945 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3946 }
3947 if (size <= sizeof (LONGEST))
3948 {
3949 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3950
3951 fprintf_unfiltered (gdb_stdlog, " %s %s",
3952 core_addr_to_string_nz (val), plongest (val));
3953 }
3954 }
3955 fprintf_unfiltered (gdb_stdlog, "\n");
3956 }
3957
3958 void
3959 target_fetch_registers (struct regcache *regcache, int regno)
3960 {
3961 struct target_ops *t;
3962
3963 for (t = current_target.beneath; t != NULL; t = t->beneath)
3964 {
3965 if (t->to_fetch_registers != NULL)
3966 {
3967 t->to_fetch_registers (t, regcache, regno);
3968 if (targetdebug)
3969 debug_print_register ("target_fetch_registers", regcache, regno);
3970 return;
3971 }
3972 }
3973 }
3974
3975 void
3976 target_store_registers (struct regcache *regcache, int regno)
3977 {
3978 struct target_ops *t;
3979
3980 if (!may_write_registers)
3981 error (_("Writing to registers is not allowed (regno %d)"), regno);
3982
3983 for (t = current_target.beneath; t != NULL; t = t->beneath)
3984 {
3985 if (t->to_store_registers != NULL)
3986 {
3987 t->to_store_registers (t, regcache, regno);
3988 if (targetdebug)
3989 {
3990 debug_print_register ("target_store_registers", regcache, regno);
3991 }
3992 return;
3993 }
3994 }
3995
3996 noprocess ();
3997 }
3998
3999 int
4000 target_core_of_thread (ptid_t ptid)
4001 {
4002 struct target_ops *t;
4003
4004 for (t = current_target.beneath; t != NULL; t = t->beneath)
4005 {
4006 if (t->to_core_of_thread != NULL)
4007 {
4008 int retval = t->to_core_of_thread (t, ptid);
4009
4010 if (targetdebug)
4011 fprintf_unfiltered (gdb_stdlog,
4012 "target_core_of_thread (%d) = %d\n",
4013 ptid_get_pid (ptid), retval);
4014 return retval;
4015 }
4016 }
4017
4018 return -1;
4019 }
4020
4021 int
4022 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4023 {
4024 struct target_ops *t;
4025
4026 for (t = current_target.beneath; t != NULL; t = t->beneath)
4027 {
4028 if (t->to_verify_memory != NULL)
4029 {
4030 int retval = t->to_verify_memory (t, data, memaddr, size);
4031
4032 if (targetdebug)
4033 fprintf_unfiltered (gdb_stdlog,
4034 "target_verify_memory (%s, %s) = %d\n",
4035 paddress (target_gdbarch (), memaddr),
4036 pulongest (size),
4037 retval);
4038 return retval;
4039 }
4040 }
4041
4042 tcomplain ();
4043 }
4044
4045 /* The documentation for this function is in its prototype declaration in
4046 target.h. */
4047
4048 int
4049 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4050 {
4051 struct target_ops *t;
4052
4053 for (t = current_target.beneath; t != NULL; t = t->beneath)
4054 if (t->to_insert_mask_watchpoint != NULL)
4055 {
4056 int ret;
4057
4058 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4059
4060 if (targetdebug)
4061 fprintf_unfiltered (gdb_stdlog, "\
4062 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4063 core_addr_to_string (addr),
4064 core_addr_to_string (mask), rw, ret);
4065
4066 return ret;
4067 }
4068
4069 return 1;
4070 }
4071
4072 /* The documentation for this function is in its prototype declaration in
4073 target.h. */
4074
4075 int
4076 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4077 {
4078 struct target_ops *t;
4079
4080 for (t = current_target.beneath; t != NULL; t = t->beneath)
4081 if (t->to_remove_mask_watchpoint != NULL)
4082 {
4083 int ret;
4084
4085 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4086
4087 if (targetdebug)
4088 fprintf_unfiltered (gdb_stdlog, "\
4089 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4090 core_addr_to_string (addr),
4091 core_addr_to_string (mask), rw, ret);
4092
4093 return ret;
4094 }
4095
4096 return 1;
4097 }
4098
4099 /* The documentation for this function is in its prototype declaration
4100 in target.h. */
4101
4102 int
4103 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4104 {
4105 struct target_ops *t;
4106
4107 for (t = current_target.beneath; t != NULL; t = t->beneath)
4108 if (t->to_masked_watch_num_registers != NULL)
4109 return t->to_masked_watch_num_registers (t, addr, mask);
4110
4111 return -1;
4112 }
4113
4114 /* The documentation for this function is in its prototype declaration
4115 in target.h. */
4116
4117 int
4118 target_ranged_break_num_registers (void)
4119 {
4120 struct target_ops *t;
4121
4122 for (t = current_target.beneath; t != NULL; t = t->beneath)
4123 if (t->to_ranged_break_num_registers != NULL)
4124 return t->to_ranged_break_num_registers (t);
4125
4126 return -1;
4127 }
4128
4129 /* See target.h. */
4130
4131 int
4132 target_supports_btrace (void)
4133 {
4134 struct target_ops *t;
4135
4136 for (t = current_target.beneath; t != NULL; t = t->beneath)
4137 if (t->to_supports_btrace != NULL)
4138 return t->to_supports_btrace ();
4139
4140 return 0;
4141 }
4142
4143 /* See target.h. */
4144
4145 struct btrace_target_info *
4146 target_enable_btrace (ptid_t ptid)
4147 {
4148 struct target_ops *t;
4149
4150 for (t = current_target.beneath; t != NULL; t = t->beneath)
4151 if (t->to_enable_btrace != NULL)
4152 return t->to_enable_btrace (ptid);
4153
4154 tcomplain ();
4155 return NULL;
4156 }
4157
4158 /* See target.h. */
4159
4160 void
4161 target_disable_btrace (struct btrace_target_info *btinfo)
4162 {
4163 struct target_ops *t;
4164
4165 for (t = current_target.beneath; t != NULL; t = t->beneath)
4166 if (t->to_disable_btrace != NULL)
4167 {
4168 t->to_disable_btrace (btinfo);
4169 return;
4170 }
4171
4172 tcomplain ();
4173 }
4174
4175 /* See target.h. */
4176
4177 void
4178 target_teardown_btrace (struct btrace_target_info *btinfo)
4179 {
4180 struct target_ops *t;
4181
4182 for (t = current_target.beneath; t != NULL; t = t->beneath)
4183 if (t->to_teardown_btrace != NULL)
4184 {
4185 t->to_teardown_btrace (btinfo);
4186 return;
4187 }
4188
4189 tcomplain ();
4190 }
4191
4192 /* See target.h. */
4193
4194 VEC (btrace_block_s) *
4195 target_read_btrace (struct btrace_target_info *btinfo,
4196 enum btrace_read_type type)
4197 {
4198 struct target_ops *t;
4199
4200 for (t = current_target.beneath; t != NULL; t = t->beneath)
4201 if (t->to_read_btrace != NULL)
4202 return t->to_read_btrace (btinfo, type);
4203
4204 tcomplain ();
4205 return NULL;
4206 }
4207
4208 /* See target.h. */
4209
4210 void
4211 target_stop_recording (void)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_stop_recording != NULL)
4217 {
4218 t->to_stop_recording ();
4219 return;
4220 }
4221
4222 /* This is optional. */
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_info_record (void)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_info_record != NULL)
4234 {
4235 t->to_info_record ();
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 void
4245 target_save_record (const char *filename)
4246 {
4247 struct target_ops *t;
4248
4249 for (t = current_target.beneath; t != NULL; t = t->beneath)
4250 if (t->to_save_record != NULL)
4251 {
4252 t->to_save_record (filename);
4253 return;
4254 }
4255
4256 tcomplain ();
4257 }
4258
4259 /* See target.h. */
4260
4261 int
4262 target_supports_delete_record (void)
4263 {
4264 struct target_ops *t;
4265
4266 for (t = current_target.beneath; t != NULL; t = t->beneath)
4267 if (t->to_delete_record != NULL)
4268 return 1;
4269
4270 return 0;
4271 }
4272
4273 /* See target.h. */
4274
4275 void
4276 target_delete_record (void)
4277 {
4278 struct target_ops *t;
4279
4280 for (t = current_target.beneath; t != NULL; t = t->beneath)
4281 if (t->to_delete_record != NULL)
4282 {
4283 t->to_delete_record ();
4284 return;
4285 }
4286
4287 tcomplain ();
4288 }
4289
4290 /* See target.h. */
4291
4292 int
4293 target_record_is_replaying (void)
4294 {
4295 struct target_ops *t;
4296
4297 for (t = current_target.beneath; t != NULL; t = t->beneath)
4298 if (t->to_record_is_replaying != NULL)
4299 return t->to_record_is_replaying ();
4300
4301 return 0;
4302 }
4303
4304 /* See target.h. */
4305
4306 void
4307 target_goto_record_begin (void)
4308 {
4309 struct target_ops *t;
4310
4311 for (t = current_target.beneath; t != NULL; t = t->beneath)
4312 if (t->to_goto_record_begin != NULL)
4313 {
4314 t->to_goto_record_begin ();
4315 return;
4316 }
4317
4318 tcomplain ();
4319 }
4320
4321 /* See target.h. */
4322
4323 void
4324 target_goto_record_end (void)
4325 {
4326 struct target_ops *t;
4327
4328 for (t = current_target.beneath; t != NULL; t = t->beneath)
4329 if (t->to_goto_record_end != NULL)
4330 {
4331 t->to_goto_record_end ();
4332 return;
4333 }
4334
4335 tcomplain ();
4336 }
4337
4338 /* See target.h. */
4339
4340 void
4341 target_goto_record (ULONGEST insn)
4342 {
4343 struct target_ops *t;
4344
4345 for (t = current_target.beneath; t != NULL; t = t->beneath)
4346 if (t->to_goto_record != NULL)
4347 {
4348 t->to_goto_record (insn);
4349 return;
4350 }
4351
4352 tcomplain ();
4353 }
4354
4355 /* See target.h. */
4356
4357 void
4358 target_insn_history (int size, int flags)
4359 {
4360 struct target_ops *t;
4361
4362 for (t = current_target.beneath; t != NULL; t = t->beneath)
4363 if (t->to_insn_history != NULL)
4364 {
4365 t->to_insn_history (size, flags);
4366 return;
4367 }
4368
4369 tcomplain ();
4370 }
4371
4372 /* See target.h. */
4373
4374 void
4375 target_insn_history_from (ULONGEST from, int size, int flags)
4376 {
4377 struct target_ops *t;
4378
4379 for (t = current_target.beneath; t != NULL; t = t->beneath)
4380 if (t->to_insn_history_from != NULL)
4381 {
4382 t->to_insn_history_from (from, size, flags);
4383 return;
4384 }
4385
4386 tcomplain ();
4387 }
4388
4389 /* See target.h. */
4390
4391 void
4392 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4393 {
4394 struct target_ops *t;
4395
4396 for (t = current_target.beneath; t != NULL; t = t->beneath)
4397 if (t->to_insn_history_range != NULL)
4398 {
4399 t->to_insn_history_range (begin, end, flags);
4400 return;
4401 }
4402
4403 tcomplain ();
4404 }
4405
4406 /* See target.h. */
4407
4408 void
4409 target_call_history (int size, int flags)
4410 {
4411 struct target_ops *t;
4412
4413 for (t = current_target.beneath; t != NULL; t = t->beneath)
4414 if (t->to_call_history != NULL)
4415 {
4416 t->to_call_history (size, flags);
4417 return;
4418 }
4419
4420 tcomplain ();
4421 }
4422
4423 /* See target.h. */
4424
4425 void
4426 target_call_history_from (ULONGEST begin, int size, int flags)
4427 {
4428 struct target_ops *t;
4429
4430 for (t = current_target.beneath; t != NULL; t = t->beneath)
4431 if (t->to_call_history_from != NULL)
4432 {
4433 t->to_call_history_from (begin, size, flags);
4434 return;
4435 }
4436
4437 tcomplain ();
4438 }
4439
4440 /* See target.h. */
4441
4442 void
4443 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4444 {
4445 struct target_ops *t;
4446
4447 for (t = current_target.beneath; t != NULL; t = t->beneath)
4448 if (t->to_call_history_range != NULL)
4449 {
4450 t->to_call_history_range (begin, end, flags);
4451 return;
4452 }
4453
4454 tcomplain ();
4455 }
4456
4457 static void
4458 debug_to_prepare_to_store (struct regcache *regcache)
4459 {
4460 debug_target.to_prepare_to_store (regcache);
4461
4462 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4463 }
4464
4465 static int
4466 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4467 int write, struct mem_attrib *attrib,
4468 struct target_ops *target)
4469 {
4470 int retval;
4471
4472 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4473 attrib, target);
4474
4475 fprintf_unfiltered (gdb_stdlog,
4476 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4477 paddress (target_gdbarch (), memaddr), len,
4478 write ? "write" : "read", retval);
4479
4480 if (retval > 0)
4481 {
4482 int i;
4483
4484 fputs_unfiltered (", bytes =", gdb_stdlog);
4485 for (i = 0; i < retval; i++)
4486 {
4487 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4488 {
4489 if (targetdebug < 2 && i > 0)
4490 {
4491 fprintf_unfiltered (gdb_stdlog, " ...");
4492 break;
4493 }
4494 fprintf_unfiltered (gdb_stdlog, "\n");
4495 }
4496
4497 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4498 }
4499 }
4500
4501 fputc_unfiltered ('\n', gdb_stdlog);
4502
4503 return retval;
4504 }
4505
4506 static void
4507 debug_to_files_info (struct target_ops *target)
4508 {
4509 debug_target.to_files_info (target);
4510
4511 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4512 }
4513
4514 static int
4515 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4516 struct bp_target_info *bp_tgt)
4517 {
4518 int retval;
4519
4520 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4521
4522 fprintf_unfiltered (gdb_stdlog,
4523 "target_insert_breakpoint (%s, xxx) = %ld\n",
4524 core_addr_to_string (bp_tgt->placed_address),
4525 (unsigned long) retval);
4526 return retval;
4527 }
4528
4529 static int
4530 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4531 struct bp_target_info *bp_tgt)
4532 {
4533 int retval;
4534
4535 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4536
4537 fprintf_unfiltered (gdb_stdlog,
4538 "target_remove_breakpoint (%s, xxx) = %ld\n",
4539 core_addr_to_string (bp_tgt->placed_address),
4540 (unsigned long) retval);
4541 return retval;
4542 }
4543
4544 static int
4545 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4546 {
4547 int retval;
4548
4549 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4550
4551 fprintf_unfiltered (gdb_stdlog,
4552 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4553 (unsigned long) type,
4554 (unsigned long) cnt,
4555 (unsigned long) from_tty,
4556 (unsigned long) retval);
4557 return retval;
4558 }
4559
4560 static int
4561 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4562 {
4563 CORE_ADDR retval;
4564
4565 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4566
4567 fprintf_unfiltered (gdb_stdlog,
4568 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4569 core_addr_to_string (addr), (unsigned long) len,
4570 core_addr_to_string (retval));
4571 return retval;
4572 }
4573
4574 static int
4575 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4576 struct expression *cond)
4577 {
4578 int retval;
4579
4580 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4581 rw, cond);
4582
4583 fprintf_unfiltered (gdb_stdlog,
4584 "target_can_accel_watchpoint_condition "
4585 "(%s, %d, %d, %s) = %ld\n",
4586 core_addr_to_string (addr), len, rw,
4587 host_address_to_string (cond), (unsigned long) retval);
4588 return retval;
4589 }
4590
4591 static int
4592 debug_to_stopped_by_watchpoint (void)
4593 {
4594 int retval;
4595
4596 retval = debug_target.to_stopped_by_watchpoint ();
4597
4598 fprintf_unfiltered (gdb_stdlog,
4599 "target_stopped_by_watchpoint () = %ld\n",
4600 (unsigned long) retval);
4601 return retval;
4602 }
4603
4604 static int
4605 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4606 {
4607 int retval;
4608
4609 retval = debug_target.to_stopped_data_address (target, addr);
4610
4611 fprintf_unfiltered (gdb_stdlog,
4612 "target_stopped_data_address ([%s]) = %ld\n",
4613 core_addr_to_string (*addr),
4614 (unsigned long)retval);
4615 return retval;
4616 }
4617
4618 static int
4619 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4620 CORE_ADDR addr,
4621 CORE_ADDR start, int length)
4622 {
4623 int retval;
4624
4625 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4626 start, length);
4627
4628 fprintf_filtered (gdb_stdlog,
4629 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4630 core_addr_to_string (addr), core_addr_to_string (start),
4631 length, retval);
4632 return retval;
4633 }
4634
4635 static int
4636 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4637 struct bp_target_info *bp_tgt)
4638 {
4639 int retval;
4640
4641 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4642
4643 fprintf_unfiltered (gdb_stdlog,
4644 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4645 core_addr_to_string (bp_tgt->placed_address),
4646 (unsigned long) retval);
4647 return retval;
4648 }
4649
4650 static int
4651 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4652 struct bp_target_info *bp_tgt)
4653 {
4654 int retval;
4655
4656 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4657
4658 fprintf_unfiltered (gdb_stdlog,
4659 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4660 core_addr_to_string (bp_tgt->placed_address),
4661 (unsigned long) retval);
4662 return retval;
4663 }
4664
4665 static int
4666 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4667 struct expression *cond)
4668 {
4669 int retval;
4670
4671 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4672
4673 fprintf_unfiltered (gdb_stdlog,
4674 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4675 core_addr_to_string (addr), len, type,
4676 host_address_to_string (cond), (unsigned long) retval);
4677 return retval;
4678 }
4679
4680 static int
4681 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4682 struct expression *cond)
4683 {
4684 int retval;
4685
4686 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4687
4688 fprintf_unfiltered (gdb_stdlog,
4689 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4690 core_addr_to_string (addr), len, type,
4691 host_address_to_string (cond), (unsigned long) retval);
4692 return retval;
4693 }
4694
4695 static void
4696 debug_to_terminal_init (void)
4697 {
4698 debug_target.to_terminal_init ();
4699
4700 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4701 }
4702
4703 static void
4704 debug_to_terminal_inferior (void)
4705 {
4706 debug_target.to_terminal_inferior ();
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_ours_for_output (void)
4713 {
4714 debug_target.to_terminal_ours_for_output ();
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_ours (void)
4721 {
4722 debug_target.to_terminal_ours ();
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4725 }
4726
4727 static void
4728 debug_to_terminal_save_ours (void)
4729 {
4730 debug_target.to_terminal_save_ours ();
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4733 }
4734
4735 static void
4736 debug_to_terminal_info (const char *arg, int from_tty)
4737 {
4738 debug_target.to_terminal_info (arg, from_tty);
4739
4740 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4741 from_tty);
4742 }
4743
4744 static void
4745 debug_to_load (char *args, int from_tty)
4746 {
4747 debug_target.to_load (args, from_tty);
4748
4749 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4750 }
4751
4752 static void
4753 debug_to_post_startup_inferior (ptid_t ptid)
4754 {
4755 debug_target.to_post_startup_inferior (ptid);
4756
4757 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4758 ptid_get_pid (ptid));
4759 }
4760
4761 static int
4762 debug_to_insert_fork_catchpoint (int pid)
4763 {
4764 int retval;
4765
4766 retval = debug_target.to_insert_fork_catchpoint (pid);
4767
4768 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4769 pid, retval);
4770
4771 return retval;
4772 }
4773
4774 static int
4775 debug_to_remove_fork_catchpoint (int pid)
4776 {
4777 int retval;
4778
4779 retval = debug_target.to_remove_fork_catchpoint (pid);
4780
4781 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4782 pid, retval);
4783
4784 return retval;
4785 }
4786
4787 static int
4788 debug_to_insert_vfork_catchpoint (int pid)
4789 {
4790 int retval;
4791
4792 retval = debug_target.to_insert_vfork_catchpoint (pid);
4793
4794 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4795 pid, retval);
4796
4797 return retval;
4798 }
4799
4800 static int
4801 debug_to_remove_vfork_catchpoint (int pid)
4802 {
4803 int retval;
4804
4805 retval = debug_target.to_remove_vfork_catchpoint (pid);
4806
4807 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4808 pid, retval);
4809
4810 return retval;
4811 }
4812
4813 static int
4814 debug_to_insert_exec_catchpoint (int pid)
4815 {
4816 int retval;
4817
4818 retval = debug_target.to_insert_exec_catchpoint (pid);
4819
4820 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4821 pid, retval);
4822
4823 return retval;
4824 }
4825
4826 static int
4827 debug_to_remove_exec_catchpoint (int pid)
4828 {
4829 int retval;
4830
4831 retval = debug_target.to_remove_exec_catchpoint (pid);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4834 pid, retval);
4835
4836 return retval;
4837 }
4838
4839 static int
4840 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4841 {
4842 int has_exited;
4843
4844 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4847 pid, wait_status, *exit_status, has_exited);
4848
4849 return has_exited;
4850 }
4851
4852 static int
4853 debug_to_can_run (void)
4854 {
4855 int retval;
4856
4857 retval = debug_target.to_can_run ();
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4860
4861 return retval;
4862 }
4863
4864 static struct gdbarch *
4865 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4866 {
4867 struct gdbarch *retval;
4868
4869 retval = debug_target.to_thread_architecture (ops, ptid);
4870
4871 fprintf_unfiltered (gdb_stdlog,
4872 "target_thread_architecture (%s) = %s [%s]\n",
4873 target_pid_to_str (ptid),
4874 host_address_to_string (retval),
4875 gdbarch_bfd_arch_info (retval)->printable_name);
4876 return retval;
4877 }
4878
4879 static void
4880 debug_to_stop (ptid_t ptid)
4881 {
4882 debug_target.to_stop (ptid);
4883
4884 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4885 target_pid_to_str (ptid));
4886 }
4887
4888 static void
4889 debug_to_rcmd (char *command,
4890 struct ui_file *outbuf)
4891 {
4892 debug_target.to_rcmd (command, outbuf);
4893 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4894 }
4895
4896 static char *
4897 debug_to_pid_to_exec_file (int pid)
4898 {
4899 char *exec_file;
4900
4901 exec_file = debug_target.to_pid_to_exec_file (pid);
4902
4903 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4904 pid, exec_file);
4905
4906 return exec_file;
4907 }
4908
4909 static void
4910 setup_target_debug (void)
4911 {
4912 memcpy (&debug_target, &current_target, sizeof debug_target);
4913
4914 current_target.to_open = debug_to_open;
4915 current_target.to_post_attach = debug_to_post_attach;
4916 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4917 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4918 current_target.to_files_info = debug_to_files_info;
4919 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4920 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4921 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4922 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4923 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4924 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4925 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4926 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4927 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4928 current_target.to_watchpoint_addr_within_range
4929 = debug_to_watchpoint_addr_within_range;
4930 current_target.to_region_ok_for_hw_watchpoint
4931 = debug_to_region_ok_for_hw_watchpoint;
4932 current_target.to_can_accel_watchpoint_condition
4933 = debug_to_can_accel_watchpoint_condition;
4934 current_target.to_terminal_init = debug_to_terminal_init;
4935 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4936 current_target.to_terminal_ours_for_output
4937 = debug_to_terminal_ours_for_output;
4938 current_target.to_terminal_ours = debug_to_terminal_ours;
4939 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4940 current_target.to_terminal_info = debug_to_terminal_info;
4941 current_target.to_load = debug_to_load;
4942 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4943 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4944 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4945 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4946 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4947 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4948 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4949 current_target.to_has_exited = debug_to_has_exited;
4950 current_target.to_can_run = debug_to_can_run;
4951 current_target.to_stop = debug_to_stop;
4952 current_target.to_rcmd = debug_to_rcmd;
4953 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4954 current_target.to_thread_architecture = debug_to_thread_architecture;
4955 }
4956 \f
4957
4958 static char targ_desc[] =
4959 "Names of targets and files being debugged.\nShows the entire \
4960 stack of targets currently in use (including the exec-file,\n\
4961 core-file, and process, if any), as well as the symbol file name.";
4962
4963 static void
4964 do_monitor_command (char *cmd,
4965 int from_tty)
4966 {
4967 if ((current_target.to_rcmd
4968 == (void (*) (char *, struct ui_file *)) tcomplain)
4969 || (current_target.to_rcmd == debug_to_rcmd
4970 && (debug_target.to_rcmd
4971 == (void (*) (char *, struct ui_file *)) tcomplain)))
4972 error (_("\"monitor\" command not supported by this target."));
4973 target_rcmd (cmd, gdb_stdtarg);
4974 }
4975
4976 /* Print the name of each layers of our target stack. */
4977
4978 static void
4979 maintenance_print_target_stack (char *cmd, int from_tty)
4980 {
4981 struct target_ops *t;
4982
4983 printf_filtered (_("The current target stack is:\n"));
4984
4985 for (t = target_stack; t != NULL; t = t->beneath)
4986 {
4987 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4988 }
4989 }
4990
4991 /* Controls if async mode is permitted. */
4992 int target_async_permitted = 0;
4993
4994 /* The set command writes to this variable. If the inferior is
4995 executing, target_async_permitted is *not* updated. */
4996 static int target_async_permitted_1 = 0;
4997
4998 static void
4999 set_target_async_command (char *args, int from_tty,
5000 struct cmd_list_element *c)
5001 {
5002 if (have_live_inferiors ())
5003 {
5004 target_async_permitted_1 = target_async_permitted;
5005 error (_("Cannot change this setting while the inferior is running."));
5006 }
5007
5008 target_async_permitted = target_async_permitted_1;
5009 }
5010
5011 static void
5012 show_target_async_command (struct ui_file *file, int from_tty,
5013 struct cmd_list_element *c,
5014 const char *value)
5015 {
5016 fprintf_filtered (file,
5017 _("Controlling the inferior in "
5018 "asynchronous mode is %s.\n"), value);
5019 }
5020
5021 /* Temporary copies of permission settings. */
5022
5023 static int may_write_registers_1 = 1;
5024 static int may_write_memory_1 = 1;
5025 static int may_insert_breakpoints_1 = 1;
5026 static int may_insert_tracepoints_1 = 1;
5027 static int may_insert_fast_tracepoints_1 = 1;
5028 static int may_stop_1 = 1;
5029
5030 /* Make the user-set values match the real values again. */
5031
5032 void
5033 update_target_permissions (void)
5034 {
5035 may_write_registers_1 = may_write_registers;
5036 may_write_memory_1 = may_write_memory;
5037 may_insert_breakpoints_1 = may_insert_breakpoints;
5038 may_insert_tracepoints_1 = may_insert_tracepoints;
5039 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5040 may_stop_1 = may_stop;
5041 }
5042
5043 /* The one function handles (most of) the permission flags in the same
5044 way. */
5045
5046 static void
5047 set_target_permissions (char *args, int from_tty,
5048 struct cmd_list_element *c)
5049 {
5050 if (target_has_execution)
5051 {
5052 update_target_permissions ();
5053 error (_("Cannot change this setting while the inferior is running."));
5054 }
5055
5056 /* Make the real values match the user-changed values. */
5057 may_write_registers = may_write_registers_1;
5058 may_insert_breakpoints = may_insert_breakpoints_1;
5059 may_insert_tracepoints = may_insert_tracepoints_1;
5060 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5061 may_stop = may_stop_1;
5062 update_observer_mode ();
5063 }
5064
5065 /* Set memory write permission independently of observer mode. */
5066
5067 static void
5068 set_write_memory_permission (char *args, int from_tty,
5069 struct cmd_list_element *c)
5070 {
5071 /* Make the real values match the user-changed values. */
5072 may_write_memory = may_write_memory_1;
5073 update_observer_mode ();
5074 }
5075
5076
5077 void
5078 initialize_targets (void)
5079 {
5080 init_dummy_target ();
5081 push_target (&dummy_target);
5082
5083 add_info ("target", target_info, targ_desc);
5084 add_info ("files", target_info, targ_desc);
5085
5086 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5087 Set target debugging."), _("\
5088 Show target debugging."), _("\
5089 When non-zero, target debugging is enabled. Higher numbers are more\n\
5090 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5091 command."),
5092 NULL,
5093 show_targetdebug,
5094 &setdebuglist, &showdebuglist);
5095
5096 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5097 &trust_readonly, _("\
5098 Set mode for reading from readonly sections."), _("\
5099 Show mode for reading from readonly sections."), _("\
5100 When this mode is on, memory reads from readonly sections (such as .text)\n\
5101 will be read from the object file instead of from the target. This will\n\
5102 result in significant performance improvement for remote targets."),
5103 NULL,
5104 show_trust_readonly,
5105 &setlist, &showlist);
5106
5107 add_com ("monitor", class_obscure, do_monitor_command,
5108 _("Send a command to the remote monitor (remote targets only)."));
5109
5110 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5111 _("Print the name of each layer of the internal target stack."),
5112 &maintenanceprintlist);
5113
5114 add_setshow_boolean_cmd ("target-async", no_class,
5115 &target_async_permitted_1, _("\
5116 Set whether gdb controls the inferior in asynchronous mode."), _("\
5117 Show whether gdb controls the inferior in asynchronous mode."), _("\
5118 Tells gdb whether to control the inferior in asynchronous mode."),
5119 set_target_async_command,
5120 show_target_async_command,
5121 &setlist,
5122 &showlist);
5123
5124 add_setshow_boolean_cmd ("may-write-registers", class_support,
5125 &may_write_registers_1, _("\
5126 Set permission to write into registers."), _("\
5127 Show permission to write into registers."), _("\
5128 When this permission is on, GDB may write into the target's registers.\n\
5129 Otherwise, any sort of write attempt will result in an error."),
5130 set_target_permissions, NULL,
5131 &setlist, &showlist);
5132
5133 add_setshow_boolean_cmd ("may-write-memory", class_support,
5134 &may_write_memory_1, _("\
5135 Set permission to write into target memory."), _("\
5136 Show permission to write into target memory."), _("\
5137 When this permission is on, GDB may write into the target's memory.\n\
5138 Otherwise, any sort of write attempt will result in an error."),
5139 set_write_memory_permission, NULL,
5140 &setlist, &showlist);
5141
5142 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5143 &may_insert_breakpoints_1, _("\
5144 Set permission to insert breakpoints in the target."), _("\
5145 Show permission to insert breakpoints in the target."), _("\
5146 When this permission is on, GDB may insert breakpoints in the program.\n\
5147 Otherwise, any sort of insertion attempt will result in an error."),
5148 set_target_permissions, NULL,
5149 &setlist, &showlist);
5150
5151 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5152 &may_insert_tracepoints_1, _("\
5153 Set permission to insert tracepoints in the target."), _("\
5154 Show permission to insert tracepoints in the target."), _("\
5155 When this permission is on, GDB may insert tracepoints in the program.\n\
5156 Otherwise, any sort of insertion attempt will result in an error."),
5157 set_target_permissions, NULL,
5158 &setlist, &showlist);
5159
5160 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5161 &may_insert_fast_tracepoints_1, _("\
5162 Set permission to insert fast tracepoints in the target."), _("\
5163 Show permission to insert fast tracepoints in the target."), _("\
5164 When this permission is on, GDB may insert fast tracepoints.\n\
5165 Otherwise, any sort of insertion attempt will result in an error."),
5166 set_target_permissions, NULL,
5167 &setlist, &showlist);
5168
5169 add_setshow_boolean_cmd ("may-interrupt", class_support,
5170 &may_stop_1, _("\
5171 Set permission to interrupt or signal the target."), _("\
5172 Show permission to interrupt or signal the target."), _("\
5173 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5174 Otherwise, any attempt to interrupt or stop will be ignored."),
5175 set_target_permissions, NULL,
5176 &setlist, &showlist);
5177 }
This page took 0.138553 seconds and 4 git commands to generate.