convert to_pid_to_exec_file
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (struct target_ops *self, ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops *self)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 /* Do not inherit to_post_attach. */
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 /* Do not inherit to_prepare_to_store. */
601 INHERIT (deprecated_xfer_memory, t);
602 /* Do not inherit to_files_info. */
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 /* Do not inherit to_can_use_hw_breakpoint. */
606 /* Do not inherit to_insert_hw_breakpoint. */
607 /* Do not inherit to_remove_hw_breakpoint. */
608 /* Do not inherit to_ranged_break_num_registers. */
609 /* Do not inherit to_insert_watchpoint. */
610 /* Do not inherit to_remove_watchpoint. */
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 /* Do not inherit to_watchpoint_addr_within_range. */
618 /* Do not inherit to_region_ok_for_hw_watchpoint. */
619 /* Do not inherit to_can_accel_watchpoint_condition. */
620 /* Do not inherit to_masked_watch_num_registers. */
621 /* Do not inherit to_terminal_init. */
622 /* Do not inherit to_terminal_inferior. */
623 /* Do not inherit to_terminal_ours_for_output. */
624 /* Do not inherit to_terminal_ours. */
625 /* Do not inherit to_terminal_save_ours. */
626 /* Do not inherit to_terminal_info. */
627 /* Do not inherit to_kill. */
628 /* Do not inherit to_load. */
629 /* Do no inherit to_create_inferior. */
630 /* Do not inherit to_post_startup_inferior. */
631 /* Do not inherit to_insert_fork_catchpoint. */
632 /* Do not inherit to_remove_fork_catchpoint. */
633 /* Do not inherit to_insert_vfork_catchpoint. */
634 /* Do not inherit to_remove_vfork_catchpoint. */
635 /* Do not inherit to_follow_fork. */
636 /* Do not inherit to_insert_exec_catchpoint. */
637 /* Do not inherit to_remove_exec_catchpoint. */
638 /* Do not inherit to_set_syscall_catchpoint. */
639 /* Do not inherit to_has_exited. */
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 /* Do not inherit to_extra_thread_info. */
648 /* Do not inherit to_thread_name. */
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 /* Do not inherit to_rcmd. */
652 /* Do not inherit to_pid_to_exec_file. */
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (deprecated_xfer_memory,
732 (int (*) (CORE_ADDR, gdb_byte *, int, int,
733 struct mem_attrib *, struct target_ops *))
734 nomemory);
735 de_fault (to_can_run,
736 (int (*) (struct target_ops *))
737 return_zero);
738 de_fault (to_stop,
739 (void (*) (struct target_ops *, ptid_t))
740 target_ignore);
741 de_fault (to_thread_architecture,
742 default_thread_architecture);
743 current_target.to_read_description = NULL;
744 de_fault (to_get_ada_task_ptid,
745 (ptid_t (*) (struct target_ops *, long, long))
746 default_get_ada_task_ptid);
747 de_fault (to_supports_multi_process,
748 (int (*) (struct target_ops *))
749 return_zero);
750 de_fault (to_supports_enable_disable_tracepoint,
751 (int (*) (struct target_ops *))
752 return_zero);
753 de_fault (to_supports_string_tracing,
754 (int (*) (struct target_ops *))
755 return_zero);
756 de_fault (to_trace_init,
757 (void (*) (struct target_ops *))
758 tcomplain);
759 de_fault (to_download_tracepoint,
760 (void (*) (struct target_ops *, struct bp_location *))
761 tcomplain);
762 de_fault (to_can_download_tracepoint,
763 (int (*) (struct target_ops *))
764 return_zero);
765 de_fault (to_download_trace_state_variable,
766 (void (*) (struct target_ops *, struct trace_state_variable *))
767 tcomplain);
768 de_fault (to_enable_tracepoint,
769 (void (*) (struct target_ops *, struct bp_location *))
770 tcomplain);
771 de_fault (to_disable_tracepoint,
772 (void (*) (struct target_ops *, struct bp_location *))
773 tcomplain);
774 de_fault (to_trace_set_readonly_regions,
775 (void (*) (struct target_ops *))
776 tcomplain);
777 de_fault (to_trace_start,
778 (void (*) (struct target_ops *))
779 tcomplain);
780 de_fault (to_get_trace_status,
781 (int (*) (struct target_ops *, struct trace_status *))
782 return_minus_one);
783 de_fault (to_get_tracepoint_status,
784 (void (*) (struct target_ops *, struct breakpoint *,
785 struct uploaded_tp *))
786 tcomplain);
787 de_fault (to_trace_stop,
788 (void (*) (struct target_ops *))
789 tcomplain);
790 de_fault (to_trace_find,
791 (int (*) (struct target_ops *,
792 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
793 return_minus_one);
794 de_fault (to_get_trace_state_variable_value,
795 (int (*) (struct target_ops *, int, LONGEST *))
796 return_zero);
797 de_fault (to_save_trace_data,
798 (int (*) (struct target_ops *, const char *))
799 tcomplain);
800 de_fault (to_upload_tracepoints,
801 (int (*) (struct target_ops *, struct uploaded_tp **))
802 return_zero);
803 de_fault (to_upload_trace_state_variables,
804 (int (*) (struct target_ops *, struct uploaded_tsv **))
805 return_zero);
806 de_fault (to_get_raw_trace_data,
807 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
808 tcomplain);
809 de_fault (to_get_min_fast_tracepoint_insn_len,
810 (int (*) (struct target_ops *))
811 return_minus_one);
812 de_fault (to_set_disconnected_tracing,
813 (void (*) (struct target_ops *, int))
814 target_ignore);
815 de_fault (to_set_circular_trace_buffer,
816 (void (*) (struct target_ops *, int))
817 target_ignore);
818 de_fault (to_set_trace_buffer_size,
819 (void (*) (struct target_ops *, LONGEST))
820 target_ignore);
821 de_fault (to_set_trace_notes,
822 (int (*) (struct target_ops *,
823 const char *, const char *, const char *))
824 return_zero);
825 de_fault (to_get_tib_address,
826 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
827 tcomplain);
828 de_fault (to_set_permissions,
829 (void (*) (struct target_ops *))
830 target_ignore);
831 de_fault (to_static_tracepoint_marker_at,
832 (int (*) (struct target_ops *,
833 CORE_ADDR, struct static_tracepoint_marker *))
834 return_zero);
835 de_fault (to_static_tracepoint_markers_by_strid,
836 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
837 const char *))
838 tcomplain);
839 de_fault (to_traceframe_info,
840 (struct traceframe_info * (*) (struct target_ops *))
841 return_null);
842 de_fault (to_supports_evaluation_of_breakpoint_conditions,
843 (int (*) (struct target_ops *))
844 return_zero);
845 de_fault (to_can_run_breakpoint_commands,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_use_agent,
849 (int (*) (struct target_ops *, int))
850 tcomplain);
851 de_fault (to_can_use_agent,
852 (int (*) (struct target_ops *))
853 return_zero);
854 de_fault (to_augmented_libraries_svr4_read,
855 (int (*) (struct target_ops *))
856 return_zero);
857 de_fault (to_execution_direction, default_execution_direction);
858
859 #undef de_fault
860
861 /* Finally, position the target-stack beneath the squashed
862 "current_target". That way code looking for a non-inherited
863 target method can quickly and simply find it. */
864 current_target.beneath = target_stack;
865
866 if (targetdebug)
867 setup_target_debug ();
868 }
869
870 /* Push a new target type into the stack of the existing target accessors,
871 possibly superseding some of the existing accessors.
872
873 Rather than allow an empty stack, we always have the dummy target at
874 the bottom stratum, so we can call the function vectors without
875 checking them. */
876
877 void
878 push_target (struct target_ops *t)
879 {
880 struct target_ops **cur;
881
882 /* Check magic number. If wrong, it probably means someone changed
883 the struct definition, but not all the places that initialize one. */
884 if (t->to_magic != OPS_MAGIC)
885 {
886 fprintf_unfiltered (gdb_stderr,
887 "Magic number of %s target struct wrong\n",
888 t->to_shortname);
889 internal_error (__FILE__, __LINE__,
890 _("failed internal consistency check"));
891 }
892
893 /* Find the proper stratum to install this target in. */
894 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
895 {
896 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
897 break;
898 }
899
900 /* If there's already targets at this stratum, remove them. */
901 /* FIXME: cagney/2003-10-15: I think this should be popping all
902 targets to CUR, and not just those at this stratum level. */
903 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
904 {
905 /* There's already something at this stratum level. Close it,
906 and un-hook it from the stack. */
907 struct target_ops *tmp = (*cur);
908
909 (*cur) = (*cur)->beneath;
910 tmp->beneath = NULL;
911 target_close (tmp);
912 }
913
914 /* We have removed all targets in our stratum, now add the new one. */
915 t->beneath = (*cur);
916 (*cur) = t;
917
918 update_current_target ();
919 }
920
921 /* Remove a target_ops vector from the stack, wherever it may be.
922 Return how many times it was removed (0 or 1). */
923
924 int
925 unpush_target (struct target_ops *t)
926 {
927 struct target_ops **cur;
928 struct target_ops *tmp;
929
930 if (t->to_stratum == dummy_stratum)
931 internal_error (__FILE__, __LINE__,
932 _("Attempt to unpush the dummy target"));
933
934 /* Look for the specified target. Note that we assume that a target
935 can only occur once in the target stack. */
936
937 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
938 {
939 if ((*cur) == t)
940 break;
941 }
942
943 /* If we don't find target_ops, quit. Only open targets should be
944 closed. */
945 if ((*cur) == NULL)
946 return 0;
947
948 /* Unchain the target. */
949 tmp = (*cur);
950 (*cur) = (*cur)->beneath;
951 tmp->beneath = NULL;
952
953 update_current_target ();
954
955 /* Finally close the target. Note we do this after unchaining, so
956 any target method calls from within the target_close
957 implementation don't end up in T anymore. */
958 target_close (t);
959
960 return 1;
961 }
962
963 void
964 pop_all_targets_above (enum strata above_stratum)
965 {
966 while ((int) (current_target.to_stratum) > (int) above_stratum)
967 {
968 if (!unpush_target (target_stack))
969 {
970 fprintf_unfiltered (gdb_stderr,
971 "pop_all_targets couldn't find target %s\n",
972 target_stack->to_shortname);
973 internal_error (__FILE__, __LINE__,
974 _("failed internal consistency check"));
975 break;
976 }
977 }
978 }
979
980 void
981 pop_all_targets (void)
982 {
983 pop_all_targets_above (dummy_stratum);
984 }
985
986 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
987
988 int
989 target_is_pushed (struct target_ops *t)
990 {
991 struct target_ops **cur;
992
993 /* Check magic number. If wrong, it probably means someone changed
994 the struct definition, but not all the places that initialize one. */
995 if (t->to_magic != OPS_MAGIC)
996 {
997 fprintf_unfiltered (gdb_stderr,
998 "Magic number of %s target struct wrong\n",
999 t->to_shortname);
1000 internal_error (__FILE__, __LINE__,
1001 _("failed internal consistency check"));
1002 }
1003
1004 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1005 if (*cur == t)
1006 return 1;
1007
1008 return 0;
1009 }
1010
1011 /* Using the objfile specified in OBJFILE, find the address for the
1012 current thread's thread-local storage with offset OFFSET. */
1013 CORE_ADDR
1014 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1015 {
1016 volatile CORE_ADDR addr = 0;
1017 struct target_ops *target;
1018
1019 for (target = current_target.beneath;
1020 target != NULL;
1021 target = target->beneath)
1022 {
1023 if (target->to_get_thread_local_address != NULL)
1024 break;
1025 }
1026
1027 if (target != NULL
1028 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1029 {
1030 ptid_t ptid = inferior_ptid;
1031 volatile struct gdb_exception ex;
1032
1033 TRY_CATCH (ex, RETURN_MASK_ALL)
1034 {
1035 CORE_ADDR lm_addr;
1036
1037 /* Fetch the load module address for this objfile. */
1038 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1039 objfile);
1040 /* If it's 0, throw the appropriate exception. */
1041 if (lm_addr == 0)
1042 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1043 _("TLS load module not found"));
1044
1045 addr = target->to_get_thread_local_address (target, ptid,
1046 lm_addr, offset);
1047 }
1048 /* If an error occurred, print TLS related messages here. Otherwise,
1049 throw the error to some higher catcher. */
1050 if (ex.reason < 0)
1051 {
1052 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1053
1054 switch (ex.error)
1055 {
1056 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1057 error (_("Cannot find thread-local variables "
1058 "in this thread library."));
1059 break;
1060 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1061 if (objfile_is_library)
1062 error (_("Cannot find shared library `%s' in dynamic"
1063 " linker's load module list"), objfile_name (objfile));
1064 else
1065 error (_("Cannot find executable file `%s' in dynamic"
1066 " linker's load module list"), objfile_name (objfile));
1067 break;
1068 case TLS_NOT_ALLOCATED_YET_ERROR:
1069 if (objfile_is_library)
1070 error (_("The inferior has not yet allocated storage for"
1071 " thread-local variables in\n"
1072 "the shared library `%s'\n"
1073 "for %s"),
1074 objfile_name (objfile), target_pid_to_str (ptid));
1075 else
1076 error (_("The inferior has not yet allocated storage for"
1077 " thread-local variables in\n"
1078 "the executable `%s'\n"
1079 "for %s"),
1080 objfile_name (objfile), target_pid_to_str (ptid));
1081 break;
1082 case TLS_GENERIC_ERROR:
1083 if (objfile_is_library)
1084 error (_("Cannot find thread-local storage for %s, "
1085 "shared library %s:\n%s"),
1086 target_pid_to_str (ptid),
1087 objfile_name (objfile), ex.message);
1088 else
1089 error (_("Cannot find thread-local storage for %s, "
1090 "executable file %s:\n%s"),
1091 target_pid_to_str (ptid),
1092 objfile_name (objfile), ex.message);
1093 break;
1094 default:
1095 throw_exception (ex);
1096 break;
1097 }
1098 }
1099 }
1100 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1101 TLS is an ABI-specific thing. But we don't do that yet. */
1102 else
1103 error (_("Cannot find thread-local variables on this target"));
1104
1105 return addr;
1106 }
1107
1108 const char *
1109 target_xfer_status_to_string (enum target_xfer_status err)
1110 {
1111 #define CASE(X) case X: return #X
1112 switch (err)
1113 {
1114 CASE(TARGET_XFER_E_IO);
1115 CASE(TARGET_XFER_E_UNAVAILABLE);
1116 default:
1117 return "<unknown>";
1118 }
1119 #undef CASE
1120 };
1121
1122
1123 #undef MIN
1124 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1125
1126 /* target_read_string -- read a null terminated string, up to LEN bytes,
1127 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1128 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1129 is responsible for freeing it. Return the number of bytes successfully
1130 read. */
1131
1132 int
1133 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1134 {
1135 int tlen, offset, i;
1136 gdb_byte buf[4];
1137 int errcode = 0;
1138 char *buffer;
1139 int buffer_allocated;
1140 char *bufptr;
1141 unsigned int nbytes_read = 0;
1142
1143 gdb_assert (string);
1144
1145 /* Small for testing. */
1146 buffer_allocated = 4;
1147 buffer = xmalloc (buffer_allocated);
1148 bufptr = buffer;
1149
1150 while (len > 0)
1151 {
1152 tlen = MIN (len, 4 - (memaddr & 3));
1153 offset = memaddr & 3;
1154
1155 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1156 if (errcode != 0)
1157 {
1158 /* The transfer request might have crossed the boundary to an
1159 unallocated region of memory. Retry the transfer, requesting
1160 a single byte. */
1161 tlen = 1;
1162 offset = 0;
1163 errcode = target_read_memory (memaddr, buf, 1);
1164 if (errcode != 0)
1165 goto done;
1166 }
1167
1168 if (bufptr - buffer + tlen > buffer_allocated)
1169 {
1170 unsigned int bytes;
1171
1172 bytes = bufptr - buffer;
1173 buffer_allocated *= 2;
1174 buffer = xrealloc (buffer, buffer_allocated);
1175 bufptr = buffer + bytes;
1176 }
1177
1178 for (i = 0; i < tlen; i++)
1179 {
1180 *bufptr++ = buf[i + offset];
1181 if (buf[i + offset] == '\000')
1182 {
1183 nbytes_read += i + 1;
1184 goto done;
1185 }
1186 }
1187
1188 memaddr += tlen;
1189 len -= tlen;
1190 nbytes_read += tlen;
1191 }
1192 done:
1193 *string = buffer;
1194 if (errnop != NULL)
1195 *errnop = errcode;
1196 return nbytes_read;
1197 }
1198
1199 struct target_section_table *
1200 target_get_section_table (struct target_ops *target)
1201 {
1202 struct target_ops *t;
1203
1204 if (targetdebug)
1205 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1206
1207 for (t = target; t != NULL; t = t->beneath)
1208 if (t->to_get_section_table != NULL)
1209 return (*t->to_get_section_table) (t);
1210
1211 return NULL;
1212 }
1213
1214 /* Find a section containing ADDR. */
1215
1216 struct target_section *
1217 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1218 {
1219 struct target_section_table *table = target_get_section_table (target);
1220 struct target_section *secp;
1221
1222 if (table == NULL)
1223 return NULL;
1224
1225 for (secp = table->sections; secp < table->sections_end; secp++)
1226 {
1227 if (addr >= secp->addr && addr < secp->endaddr)
1228 return secp;
1229 }
1230 return NULL;
1231 }
1232
1233 /* Read memory from the live target, even if currently inspecting a
1234 traceframe. The return is the same as that of target_read. */
1235
1236 static enum target_xfer_status
1237 target_read_live_memory (enum target_object object,
1238 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1239 ULONGEST *xfered_len)
1240 {
1241 enum target_xfer_status ret;
1242 struct cleanup *cleanup;
1243
1244 /* Switch momentarily out of tfind mode so to access live memory.
1245 Note that this must not clear global state, such as the frame
1246 cache, which must still remain valid for the previous traceframe.
1247 We may be _building_ the frame cache at this point. */
1248 cleanup = make_cleanup_restore_traceframe_number ();
1249 set_traceframe_number (-1);
1250
1251 ret = target_xfer_partial (current_target.beneath, object, NULL,
1252 myaddr, NULL, memaddr, len, xfered_len);
1253
1254 do_cleanups (cleanup);
1255 return ret;
1256 }
1257
1258 /* Using the set of read-only target sections of OPS, read live
1259 read-only memory. Note that the actual reads start from the
1260 top-most target again.
1261
1262 For interface/parameters/return description see target.h,
1263 to_xfer_partial. */
1264
1265 static enum target_xfer_status
1266 memory_xfer_live_readonly_partial (struct target_ops *ops,
1267 enum target_object object,
1268 gdb_byte *readbuf, ULONGEST memaddr,
1269 ULONGEST len, ULONGEST *xfered_len)
1270 {
1271 struct target_section *secp;
1272 struct target_section_table *table;
1273
1274 secp = target_section_by_addr (ops, memaddr);
1275 if (secp != NULL
1276 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1277 secp->the_bfd_section)
1278 & SEC_READONLY))
1279 {
1280 struct target_section *p;
1281 ULONGEST memend = memaddr + len;
1282
1283 table = target_get_section_table (ops);
1284
1285 for (p = table->sections; p < table->sections_end; p++)
1286 {
1287 if (memaddr >= p->addr)
1288 {
1289 if (memend <= p->endaddr)
1290 {
1291 /* Entire transfer is within this section. */
1292 return target_read_live_memory (object, memaddr,
1293 readbuf, len, xfered_len);
1294 }
1295 else if (memaddr >= p->endaddr)
1296 {
1297 /* This section ends before the transfer starts. */
1298 continue;
1299 }
1300 else
1301 {
1302 /* This section overlaps the transfer. Just do half. */
1303 len = p->endaddr - memaddr;
1304 return target_read_live_memory (object, memaddr,
1305 readbuf, len, xfered_len);
1306 }
1307 }
1308 }
1309 }
1310
1311 return TARGET_XFER_EOF;
1312 }
1313
1314 /* Read memory from more than one valid target. A core file, for
1315 instance, could have some of memory but delegate other bits to
1316 the target below it. So, we must manually try all targets. */
1317
1318 static enum target_xfer_status
1319 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1320 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1321 ULONGEST *xfered_len)
1322 {
1323 enum target_xfer_status res;
1324
1325 do
1326 {
1327 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1328 readbuf, writebuf, memaddr, len,
1329 xfered_len);
1330 if (res == TARGET_XFER_OK)
1331 break;
1332
1333 /* Stop if the target reports that the memory is not available. */
1334 if (res == TARGET_XFER_E_UNAVAILABLE)
1335 break;
1336
1337 /* We want to continue past core files to executables, but not
1338 past a running target's memory. */
1339 if (ops->to_has_all_memory (ops))
1340 break;
1341
1342 ops = ops->beneath;
1343 }
1344 while (ops != NULL);
1345
1346 return res;
1347 }
1348
1349 /* Perform a partial memory transfer.
1350 For docs see target.h, to_xfer_partial. */
1351
1352 static enum target_xfer_status
1353 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1354 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1355 ULONGEST len, ULONGEST *xfered_len)
1356 {
1357 enum target_xfer_status res;
1358 int reg_len;
1359 struct mem_region *region;
1360 struct inferior *inf;
1361
1362 /* For accesses to unmapped overlay sections, read directly from
1363 files. Must do this first, as MEMADDR may need adjustment. */
1364 if (readbuf != NULL && overlay_debugging)
1365 {
1366 struct obj_section *section = find_pc_overlay (memaddr);
1367
1368 if (pc_in_unmapped_range (memaddr, section))
1369 {
1370 struct target_section_table *table
1371 = target_get_section_table (ops);
1372 const char *section_name = section->the_bfd_section->name;
1373
1374 memaddr = overlay_mapped_address (memaddr, section);
1375 return section_table_xfer_memory_partial (readbuf, writebuf,
1376 memaddr, len, xfered_len,
1377 table->sections,
1378 table->sections_end,
1379 section_name);
1380 }
1381 }
1382
1383 /* Try the executable files, if "trust-readonly-sections" is set. */
1384 if (readbuf != NULL && trust_readonly)
1385 {
1386 struct target_section *secp;
1387 struct target_section_table *table;
1388
1389 secp = target_section_by_addr (ops, memaddr);
1390 if (secp != NULL
1391 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1392 secp->the_bfd_section)
1393 & SEC_READONLY))
1394 {
1395 table = target_get_section_table (ops);
1396 return section_table_xfer_memory_partial (readbuf, writebuf,
1397 memaddr, len, xfered_len,
1398 table->sections,
1399 table->sections_end,
1400 NULL);
1401 }
1402 }
1403
1404 /* If reading unavailable memory in the context of traceframes, and
1405 this address falls within a read-only section, fallback to
1406 reading from live memory. */
1407 if (readbuf != NULL && get_traceframe_number () != -1)
1408 {
1409 VEC(mem_range_s) *available;
1410
1411 /* If we fail to get the set of available memory, then the
1412 target does not support querying traceframe info, and so we
1413 attempt reading from the traceframe anyway (assuming the
1414 target implements the old QTro packet then). */
1415 if (traceframe_available_memory (&available, memaddr, len))
1416 {
1417 struct cleanup *old_chain;
1418
1419 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1420
1421 if (VEC_empty (mem_range_s, available)
1422 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1423 {
1424 /* Don't read into the traceframe's available
1425 memory. */
1426 if (!VEC_empty (mem_range_s, available))
1427 {
1428 LONGEST oldlen = len;
1429
1430 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1431 gdb_assert (len <= oldlen);
1432 }
1433
1434 do_cleanups (old_chain);
1435
1436 /* This goes through the topmost target again. */
1437 res = memory_xfer_live_readonly_partial (ops, object,
1438 readbuf, memaddr,
1439 len, xfered_len);
1440 if (res == TARGET_XFER_OK)
1441 return TARGET_XFER_OK;
1442 else
1443 {
1444 /* No use trying further, we know some memory starting
1445 at MEMADDR isn't available. */
1446 *xfered_len = len;
1447 return TARGET_XFER_E_UNAVAILABLE;
1448 }
1449 }
1450
1451 /* Don't try to read more than how much is available, in
1452 case the target implements the deprecated QTro packet to
1453 cater for older GDBs (the target's knowledge of read-only
1454 sections may be outdated by now). */
1455 len = VEC_index (mem_range_s, available, 0)->length;
1456
1457 do_cleanups (old_chain);
1458 }
1459 }
1460
1461 /* Try GDB's internal data cache. */
1462 region = lookup_mem_region (memaddr);
1463 /* region->hi == 0 means there's no upper bound. */
1464 if (memaddr + len < region->hi || region->hi == 0)
1465 reg_len = len;
1466 else
1467 reg_len = region->hi - memaddr;
1468
1469 switch (region->attrib.mode)
1470 {
1471 case MEM_RO:
1472 if (writebuf != NULL)
1473 return TARGET_XFER_E_IO;
1474 break;
1475
1476 case MEM_WO:
1477 if (readbuf != NULL)
1478 return TARGET_XFER_E_IO;
1479 break;
1480
1481 case MEM_FLASH:
1482 /* We only support writing to flash during "load" for now. */
1483 if (writebuf != NULL)
1484 error (_("Writing to flash memory forbidden in this context"));
1485 break;
1486
1487 case MEM_NONE:
1488 return TARGET_XFER_E_IO;
1489 }
1490
1491 if (!ptid_equal (inferior_ptid, null_ptid))
1492 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1493 else
1494 inf = NULL;
1495
1496 if (inf != NULL
1497 /* The dcache reads whole cache lines; that doesn't play well
1498 with reading from a trace buffer, because reading outside of
1499 the collected memory range fails. */
1500 && get_traceframe_number () == -1
1501 && (region->attrib.cache
1502 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1503 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1504 {
1505 DCACHE *dcache = target_dcache_get_or_init ();
1506 int l;
1507
1508 if (readbuf != NULL)
1509 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1510 else
1511 /* FIXME drow/2006-08-09: If we're going to preserve const
1512 correctness dcache_xfer_memory should take readbuf and
1513 writebuf. */
1514 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1515 reg_len, 1);
1516 if (l <= 0)
1517 return TARGET_XFER_E_IO;
1518 else
1519 {
1520 *xfered_len = (ULONGEST) l;
1521 return TARGET_XFER_OK;
1522 }
1523 }
1524
1525 /* If none of those methods found the memory we wanted, fall back
1526 to a target partial transfer. Normally a single call to
1527 to_xfer_partial is enough; if it doesn't recognize an object
1528 it will call the to_xfer_partial of the next target down.
1529 But for memory this won't do. Memory is the only target
1530 object which can be read from more than one valid target.
1531 A core file, for instance, could have some of memory but
1532 delegate other bits to the target below it. So, we must
1533 manually try all targets. */
1534
1535 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1536 xfered_len);
1537
1538 /* Make sure the cache gets updated no matter what - if we are writing
1539 to the stack. Even if this write is not tagged as such, we still need
1540 to update the cache. */
1541
1542 if (res == TARGET_XFER_OK
1543 && inf != NULL
1544 && writebuf != NULL
1545 && target_dcache_init_p ()
1546 && !region->attrib.cache
1547 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1548 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1549 {
1550 DCACHE *dcache = target_dcache_get ();
1551
1552 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1553 }
1554
1555 /* If we still haven't got anything, return the last error. We
1556 give up. */
1557 return res;
1558 }
1559
1560 /* Perform a partial memory transfer. For docs see target.h,
1561 to_xfer_partial. */
1562
1563 static enum target_xfer_status
1564 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1565 gdb_byte *readbuf, const gdb_byte *writebuf,
1566 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1567 {
1568 enum target_xfer_status res;
1569
1570 /* Zero length requests are ok and require no work. */
1571 if (len == 0)
1572 return TARGET_XFER_EOF;
1573
1574 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1575 breakpoint insns, thus hiding out from higher layers whether
1576 there are software breakpoints inserted in the code stream. */
1577 if (readbuf != NULL)
1578 {
1579 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1580 xfered_len);
1581
1582 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1583 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1584 }
1585 else
1586 {
1587 void *buf;
1588 struct cleanup *old_chain;
1589
1590 /* A large write request is likely to be partially satisfied
1591 by memory_xfer_partial_1. We will continually malloc
1592 and free a copy of the entire write request for breakpoint
1593 shadow handling even though we only end up writing a small
1594 subset of it. Cap writes to 4KB to mitigate this. */
1595 len = min (4096, len);
1596
1597 buf = xmalloc (len);
1598 old_chain = make_cleanup (xfree, buf);
1599 memcpy (buf, writebuf, len);
1600
1601 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1602 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1603 xfered_len);
1604
1605 do_cleanups (old_chain);
1606 }
1607
1608 return res;
1609 }
1610
1611 static void
1612 restore_show_memory_breakpoints (void *arg)
1613 {
1614 show_memory_breakpoints = (uintptr_t) arg;
1615 }
1616
1617 struct cleanup *
1618 make_show_memory_breakpoints_cleanup (int show)
1619 {
1620 int current = show_memory_breakpoints;
1621
1622 show_memory_breakpoints = show;
1623 return make_cleanup (restore_show_memory_breakpoints,
1624 (void *) (uintptr_t) current);
1625 }
1626
1627 /* For docs see target.h, to_xfer_partial. */
1628
1629 enum target_xfer_status
1630 target_xfer_partial (struct target_ops *ops,
1631 enum target_object object, const char *annex,
1632 gdb_byte *readbuf, const gdb_byte *writebuf,
1633 ULONGEST offset, ULONGEST len,
1634 ULONGEST *xfered_len)
1635 {
1636 enum target_xfer_status retval;
1637
1638 gdb_assert (ops->to_xfer_partial != NULL);
1639
1640 /* Transfer is done when LEN is zero. */
1641 if (len == 0)
1642 return TARGET_XFER_EOF;
1643
1644 if (writebuf && !may_write_memory)
1645 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1646 core_addr_to_string_nz (offset), plongest (len));
1647
1648 *xfered_len = 0;
1649
1650 /* If this is a memory transfer, let the memory-specific code
1651 have a look at it instead. Memory transfers are more
1652 complicated. */
1653 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1654 || object == TARGET_OBJECT_CODE_MEMORY)
1655 retval = memory_xfer_partial (ops, object, readbuf,
1656 writebuf, offset, len, xfered_len);
1657 else if (object == TARGET_OBJECT_RAW_MEMORY)
1658 {
1659 /* Request the normal memory object from other layers. */
1660 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1661 xfered_len);
1662 }
1663 else
1664 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1665 writebuf, offset, len, xfered_len);
1666
1667 if (targetdebug)
1668 {
1669 const unsigned char *myaddr = NULL;
1670
1671 fprintf_unfiltered (gdb_stdlog,
1672 "%s:target_xfer_partial "
1673 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1674 ops->to_shortname,
1675 (int) object,
1676 (annex ? annex : "(null)"),
1677 host_address_to_string (readbuf),
1678 host_address_to_string (writebuf),
1679 core_addr_to_string_nz (offset),
1680 pulongest (len), retval,
1681 pulongest (*xfered_len));
1682
1683 if (readbuf)
1684 myaddr = readbuf;
1685 if (writebuf)
1686 myaddr = writebuf;
1687 if (retval == TARGET_XFER_OK && myaddr != NULL)
1688 {
1689 int i;
1690
1691 fputs_unfiltered (", bytes =", gdb_stdlog);
1692 for (i = 0; i < *xfered_len; i++)
1693 {
1694 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1695 {
1696 if (targetdebug < 2 && i > 0)
1697 {
1698 fprintf_unfiltered (gdb_stdlog, " ...");
1699 break;
1700 }
1701 fprintf_unfiltered (gdb_stdlog, "\n");
1702 }
1703
1704 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1705 }
1706 }
1707
1708 fputc_unfiltered ('\n', gdb_stdlog);
1709 }
1710
1711 /* Check implementations of to_xfer_partial update *XFERED_LEN
1712 properly. Do assertion after printing debug messages, so that we
1713 can find more clues on assertion failure from debugging messages. */
1714 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1715 gdb_assert (*xfered_len > 0);
1716
1717 return retval;
1718 }
1719
1720 /* Read LEN bytes of target memory at address MEMADDR, placing the
1721 results in GDB's memory at MYADDR. Returns either 0 for success or
1722 TARGET_XFER_E_IO if any error occurs.
1723
1724 If an error occurs, no guarantee is made about the contents of the data at
1725 MYADDR. In particular, the caller should not depend upon partial reads
1726 filling the buffer with good data. There is no way for the caller to know
1727 how much good data might have been transfered anyway. Callers that can
1728 deal with partial reads should call target_read (which will retry until
1729 it makes no progress, and then return how much was transferred). */
1730
1731 int
1732 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1733 {
1734 /* Dispatch to the topmost target, not the flattened current_target.
1735 Memory accesses check target->to_has_(all_)memory, and the
1736 flattened target doesn't inherit those. */
1737 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1738 myaddr, memaddr, len) == len)
1739 return 0;
1740 else
1741 return TARGET_XFER_E_IO;
1742 }
1743
1744 /* Like target_read_memory, but specify explicitly that this is a read
1745 from the target's raw memory. That is, this read bypasses the
1746 dcache, breakpoint shadowing, etc. */
1747
1748 int
1749 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1750 {
1751 /* See comment in target_read_memory about why the request starts at
1752 current_target.beneath. */
1753 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1754 myaddr, memaddr, len) == len)
1755 return 0;
1756 else
1757 return TARGET_XFER_E_IO;
1758 }
1759
1760 /* Like target_read_memory, but specify explicitly that this is a read from
1761 the target's stack. This may trigger different cache behavior. */
1762
1763 int
1764 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1765 {
1766 /* See comment in target_read_memory about why the request starts at
1767 current_target.beneath. */
1768 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1769 myaddr, memaddr, len) == len)
1770 return 0;
1771 else
1772 return TARGET_XFER_E_IO;
1773 }
1774
1775 /* Like target_read_memory, but specify explicitly that this is a read from
1776 the target's code. This may trigger different cache behavior. */
1777
1778 int
1779 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1780 {
1781 /* See comment in target_read_memory about why the request starts at
1782 current_target.beneath. */
1783 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1784 myaddr, memaddr, len) == len)
1785 return 0;
1786 else
1787 return TARGET_XFER_E_IO;
1788 }
1789
1790 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1791 Returns either 0 for success or TARGET_XFER_E_IO if any
1792 error occurs. If an error occurs, no guarantee is made about how
1793 much data got written. Callers that can deal with partial writes
1794 should call target_write. */
1795
1796 int
1797 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1798 {
1799 /* See comment in target_read_memory about why the request starts at
1800 current_target.beneath. */
1801 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1802 myaddr, memaddr, len) == len)
1803 return 0;
1804 else
1805 return TARGET_XFER_E_IO;
1806 }
1807
1808 /* Write LEN bytes from MYADDR to target raw memory at address
1809 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1810 if any error occurs. If an error occurs, no guarantee is made
1811 about how much data got written. Callers that can deal with
1812 partial writes should call target_write. */
1813
1814 int
1815 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1816 {
1817 /* See comment in target_read_memory about why the request starts at
1818 current_target.beneath. */
1819 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1820 myaddr, memaddr, len) == len)
1821 return 0;
1822 else
1823 return TARGET_XFER_E_IO;
1824 }
1825
1826 /* Fetch the target's memory map. */
1827
1828 VEC(mem_region_s) *
1829 target_memory_map (void)
1830 {
1831 VEC(mem_region_s) *result;
1832 struct mem_region *last_one, *this_one;
1833 int ix;
1834 struct target_ops *t;
1835
1836 if (targetdebug)
1837 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1838
1839 for (t = current_target.beneath; t != NULL; t = t->beneath)
1840 if (t->to_memory_map != NULL)
1841 break;
1842
1843 if (t == NULL)
1844 return NULL;
1845
1846 result = t->to_memory_map (t);
1847 if (result == NULL)
1848 return NULL;
1849
1850 qsort (VEC_address (mem_region_s, result),
1851 VEC_length (mem_region_s, result),
1852 sizeof (struct mem_region), mem_region_cmp);
1853
1854 /* Check that regions do not overlap. Simultaneously assign
1855 a numbering for the "mem" commands to use to refer to
1856 each region. */
1857 last_one = NULL;
1858 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1859 {
1860 this_one->number = ix;
1861
1862 if (last_one && last_one->hi > this_one->lo)
1863 {
1864 warning (_("Overlapping regions in memory map: ignoring"));
1865 VEC_free (mem_region_s, result);
1866 return NULL;
1867 }
1868 last_one = this_one;
1869 }
1870
1871 return result;
1872 }
1873
1874 void
1875 target_flash_erase (ULONGEST address, LONGEST length)
1876 {
1877 struct target_ops *t;
1878
1879 for (t = current_target.beneath; t != NULL; t = t->beneath)
1880 if (t->to_flash_erase != NULL)
1881 {
1882 if (targetdebug)
1883 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1884 hex_string (address), phex (length, 0));
1885 t->to_flash_erase (t, address, length);
1886 return;
1887 }
1888
1889 tcomplain ();
1890 }
1891
1892 void
1893 target_flash_done (void)
1894 {
1895 struct target_ops *t;
1896
1897 for (t = current_target.beneath; t != NULL; t = t->beneath)
1898 if (t->to_flash_done != NULL)
1899 {
1900 if (targetdebug)
1901 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1902 t->to_flash_done (t);
1903 return;
1904 }
1905
1906 tcomplain ();
1907 }
1908
1909 static void
1910 show_trust_readonly (struct ui_file *file, int from_tty,
1911 struct cmd_list_element *c, const char *value)
1912 {
1913 fprintf_filtered (file,
1914 _("Mode for reading from readonly sections is %s.\n"),
1915 value);
1916 }
1917
1918 /* More generic transfers. */
1919
1920 static enum target_xfer_status
1921 default_xfer_partial (struct target_ops *ops, enum target_object object,
1922 const char *annex, gdb_byte *readbuf,
1923 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1924 ULONGEST *xfered_len)
1925 {
1926 if (object == TARGET_OBJECT_MEMORY
1927 && ops->deprecated_xfer_memory != NULL)
1928 /* If available, fall back to the target's
1929 "deprecated_xfer_memory" method. */
1930 {
1931 int xfered = -1;
1932
1933 errno = 0;
1934 if (writebuf != NULL)
1935 {
1936 void *buffer = xmalloc (len);
1937 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1938
1939 memcpy (buffer, writebuf, len);
1940 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1941 1/*write*/, NULL, ops);
1942 do_cleanups (cleanup);
1943 }
1944 if (readbuf != NULL)
1945 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1946 0/*read*/, NULL, ops);
1947 if (xfered > 0)
1948 {
1949 *xfered_len = (ULONGEST) xfered;
1950 return TARGET_XFER_E_IO;
1951 }
1952 else if (xfered == 0 && errno == 0)
1953 /* "deprecated_xfer_memory" uses 0, cross checked against
1954 ERRNO as one indication of an error. */
1955 return TARGET_XFER_EOF;
1956 else
1957 return TARGET_XFER_E_IO;
1958 }
1959 else
1960 {
1961 gdb_assert (ops->beneath != NULL);
1962 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1963 readbuf, writebuf, offset, len,
1964 xfered_len);
1965 }
1966 }
1967
1968 /* Target vector read/write partial wrapper functions. */
1969
1970 static enum target_xfer_status
1971 target_read_partial (struct target_ops *ops,
1972 enum target_object object,
1973 const char *annex, gdb_byte *buf,
1974 ULONGEST offset, ULONGEST len,
1975 ULONGEST *xfered_len)
1976 {
1977 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1978 xfered_len);
1979 }
1980
1981 static enum target_xfer_status
1982 target_write_partial (struct target_ops *ops,
1983 enum target_object object,
1984 const char *annex, const gdb_byte *buf,
1985 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1986 {
1987 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1988 xfered_len);
1989 }
1990
1991 /* Wrappers to perform the full transfer. */
1992
1993 /* For docs on target_read see target.h. */
1994
1995 LONGEST
1996 target_read (struct target_ops *ops,
1997 enum target_object object,
1998 const char *annex, gdb_byte *buf,
1999 ULONGEST offset, LONGEST len)
2000 {
2001 LONGEST xfered = 0;
2002
2003 while (xfered < len)
2004 {
2005 ULONGEST xfered_len;
2006 enum target_xfer_status status;
2007
2008 status = target_read_partial (ops, object, annex,
2009 (gdb_byte *) buf + xfered,
2010 offset + xfered, len - xfered,
2011 &xfered_len);
2012
2013 /* Call an observer, notifying them of the xfer progress? */
2014 if (status == TARGET_XFER_EOF)
2015 return xfered;
2016 else if (status == TARGET_XFER_OK)
2017 {
2018 xfered += xfered_len;
2019 QUIT;
2020 }
2021 else
2022 return -1;
2023
2024 }
2025 return len;
2026 }
2027
2028 /* Assuming that the entire [begin, end) range of memory cannot be
2029 read, try to read whatever subrange is possible to read.
2030
2031 The function returns, in RESULT, either zero or one memory block.
2032 If there's a readable subrange at the beginning, it is completely
2033 read and returned. Any further readable subrange will not be read.
2034 Otherwise, if there's a readable subrange at the end, it will be
2035 completely read and returned. Any readable subranges before it
2036 (obviously, not starting at the beginning), will be ignored. In
2037 other cases -- either no readable subrange, or readable subrange(s)
2038 that is neither at the beginning, or end, nothing is returned.
2039
2040 The purpose of this function is to handle a read across a boundary
2041 of accessible memory in a case when memory map is not available.
2042 The above restrictions are fine for this case, but will give
2043 incorrect results if the memory is 'patchy'. However, supporting
2044 'patchy' memory would require trying to read every single byte,
2045 and it seems unacceptable solution. Explicit memory map is
2046 recommended for this case -- and target_read_memory_robust will
2047 take care of reading multiple ranges then. */
2048
2049 static void
2050 read_whatever_is_readable (struct target_ops *ops,
2051 ULONGEST begin, ULONGEST end,
2052 VEC(memory_read_result_s) **result)
2053 {
2054 gdb_byte *buf = xmalloc (end - begin);
2055 ULONGEST current_begin = begin;
2056 ULONGEST current_end = end;
2057 int forward;
2058 memory_read_result_s r;
2059 ULONGEST xfered_len;
2060
2061 /* If we previously failed to read 1 byte, nothing can be done here. */
2062 if (end - begin <= 1)
2063 {
2064 xfree (buf);
2065 return;
2066 }
2067
2068 /* Check that either first or the last byte is readable, and give up
2069 if not. This heuristic is meant to permit reading accessible memory
2070 at the boundary of accessible region. */
2071 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2072 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2073 {
2074 forward = 1;
2075 ++current_begin;
2076 }
2077 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2078 buf + (end-begin) - 1, end - 1, 1,
2079 &xfered_len) == TARGET_XFER_OK)
2080 {
2081 forward = 0;
2082 --current_end;
2083 }
2084 else
2085 {
2086 xfree (buf);
2087 return;
2088 }
2089
2090 /* Loop invariant is that the [current_begin, current_end) was previously
2091 found to be not readable as a whole.
2092
2093 Note loop condition -- if the range has 1 byte, we can't divide the range
2094 so there's no point trying further. */
2095 while (current_end - current_begin > 1)
2096 {
2097 ULONGEST first_half_begin, first_half_end;
2098 ULONGEST second_half_begin, second_half_end;
2099 LONGEST xfer;
2100 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2101
2102 if (forward)
2103 {
2104 first_half_begin = current_begin;
2105 first_half_end = middle;
2106 second_half_begin = middle;
2107 second_half_end = current_end;
2108 }
2109 else
2110 {
2111 first_half_begin = middle;
2112 first_half_end = current_end;
2113 second_half_begin = current_begin;
2114 second_half_end = middle;
2115 }
2116
2117 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2118 buf + (first_half_begin - begin),
2119 first_half_begin,
2120 first_half_end - first_half_begin);
2121
2122 if (xfer == first_half_end - first_half_begin)
2123 {
2124 /* This half reads up fine. So, the error must be in the
2125 other half. */
2126 current_begin = second_half_begin;
2127 current_end = second_half_end;
2128 }
2129 else
2130 {
2131 /* This half is not readable. Because we've tried one byte, we
2132 know some part of this half if actually redable. Go to the next
2133 iteration to divide again and try to read.
2134
2135 We don't handle the other half, because this function only tries
2136 to read a single readable subrange. */
2137 current_begin = first_half_begin;
2138 current_end = first_half_end;
2139 }
2140 }
2141
2142 if (forward)
2143 {
2144 /* The [begin, current_begin) range has been read. */
2145 r.begin = begin;
2146 r.end = current_begin;
2147 r.data = buf;
2148 }
2149 else
2150 {
2151 /* The [current_end, end) range has been read. */
2152 LONGEST rlen = end - current_end;
2153
2154 r.data = xmalloc (rlen);
2155 memcpy (r.data, buf + current_end - begin, rlen);
2156 r.begin = current_end;
2157 r.end = end;
2158 xfree (buf);
2159 }
2160 VEC_safe_push(memory_read_result_s, (*result), &r);
2161 }
2162
2163 void
2164 free_memory_read_result_vector (void *x)
2165 {
2166 VEC(memory_read_result_s) *v = x;
2167 memory_read_result_s *current;
2168 int ix;
2169
2170 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2171 {
2172 xfree (current->data);
2173 }
2174 VEC_free (memory_read_result_s, v);
2175 }
2176
2177 VEC(memory_read_result_s) *
2178 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2179 {
2180 VEC(memory_read_result_s) *result = 0;
2181
2182 LONGEST xfered = 0;
2183 while (xfered < len)
2184 {
2185 struct mem_region *region = lookup_mem_region (offset + xfered);
2186 LONGEST rlen;
2187
2188 /* If there is no explicit region, a fake one should be created. */
2189 gdb_assert (region);
2190
2191 if (region->hi == 0)
2192 rlen = len - xfered;
2193 else
2194 rlen = region->hi - offset;
2195
2196 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2197 {
2198 /* Cannot read this region. Note that we can end up here only
2199 if the region is explicitly marked inaccessible, or
2200 'inaccessible-by-default' is in effect. */
2201 xfered += rlen;
2202 }
2203 else
2204 {
2205 LONGEST to_read = min (len - xfered, rlen);
2206 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2207
2208 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2209 (gdb_byte *) buffer,
2210 offset + xfered, to_read);
2211 /* Call an observer, notifying them of the xfer progress? */
2212 if (xfer <= 0)
2213 {
2214 /* Got an error reading full chunk. See if maybe we can read
2215 some subrange. */
2216 xfree (buffer);
2217 read_whatever_is_readable (ops, offset + xfered,
2218 offset + xfered + to_read, &result);
2219 xfered += to_read;
2220 }
2221 else
2222 {
2223 struct memory_read_result r;
2224 r.data = buffer;
2225 r.begin = offset + xfered;
2226 r.end = r.begin + xfer;
2227 VEC_safe_push (memory_read_result_s, result, &r);
2228 xfered += xfer;
2229 }
2230 QUIT;
2231 }
2232 }
2233 return result;
2234 }
2235
2236
2237 /* An alternative to target_write with progress callbacks. */
2238
2239 LONGEST
2240 target_write_with_progress (struct target_ops *ops,
2241 enum target_object object,
2242 const char *annex, const gdb_byte *buf,
2243 ULONGEST offset, LONGEST len,
2244 void (*progress) (ULONGEST, void *), void *baton)
2245 {
2246 LONGEST xfered = 0;
2247
2248 /* Give the progress callback a chance to set up. */
2249 if (progress)
2250 (*progress) (0, baton);
2251
2252 while (xfered < len)
2253 {
2254 ULONGEST xfered_len;
2255 enum target_xfer_status status;
2256
2257 status = target_write_partial (ops, object, annex,
2258 (gdb_byte *) buf + xfered,
2259 offset + xfered, len - xfered,
2260 &xfered_len);
2261
2262 if (status == TARGET_XFER_EOF)
2263 return xfered;
2264 if (TARGET_XFER_STATUS_ERROR_P (status))
2265 return -1;
2266
2267 gdb_assert (status == TARGET_XFER_OK);
2268 if (progress)
2269 (*progress) (xfered_len, baton);
2270
2271 xfered += xfered_len;
2272 QUIT;
2273 }
2274 return len;
2275 }
2276
2277 /* For docs on target_write see target.h. */
2278
2279 LONGEST
2280 target_write (struct target_ops *ops,
2281 enum target_object object,
2282 const char *annex, const gdb_byte *buf,
2283 ULONGEST offset, LONGEST len)
2284 {
2285 return target_write_with_progress (ops, object, annex, buf, offset, len,
2286 NULL, NULL);
2287 }
2288
2289 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2290 the size of the transferred data. PADDING additional bytes are
2291 available in *BUF_P. This is a helper function for
2292 target_read_alloc; see the declaration of that function for more
2293 information. */
2294
2295 static LONGEST
2296 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2297 const char *annex, gdb_byte **buf_p, int padding)
2298 {
2299 size_t buf_alloc, buf_pos;
2300 gdb_byte *buf;
2301
2302 /* This function does not have a length parameter; it reads the
2303 entire OBJECT). Also, it doesn't support objects fetched partly
2304 from one target and partly from another (in a different stratum,
2305 e.g. a core file and an executable). Both reasons make it
2306 unsuitable for reading memory. */
2307 gdb_assert (object != TARGET_OBJECT_MEMORY);
2308
2309 /* Start by reading up to 4K at a time. The target will throttle
2310 this number down if necessary. */
2311 buf_alloc = 4096;
2312 buf = xmalloc (buf_alloc);
2313 buf_pos = 0;
2314 while (1)
2315 {
2316 ULONGEST xfered_len;
2317 enum target_xfer_status status;
2318
2319 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2320 buf_pos, buf_alloc - buf_pos - padding,
2321 &xfered_len);
2322
2323 if (status == TARGET_XFER_EOF)
2324 {
2325 /* Read all there was. */
2326 if (buf_pos == 0)
2327 xfree (buf);
2328 else
2329 *buf_p = buf;
2330 return buf_pos;
2331 }
2332 else if (status != TARGET_XFER_OK)
2333 {
2334 /* An error occurred. */
2335 xfree (buf);
2336 return TARGET_XFER_E_IO;
2337 }
2338
2339 buf_pos += xfered_len;
2340
2341 /* If the buffer is filling up, expand it. */
2342 if (buf_alloc < buf_pos * 2)
2343 {
2344 buf_alloc *= 2;
2345 buf = xrealloc (buf, buf_alloc);
2346 }
2347
2348 QUIT;
2349 }
2350 }
2351
2352 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2353 the size of the transferred data. See the declaration in "target.h"
2354 function for more information about the return value. */
2355
2356 LONGEST
2357 target_read_alloc (struct target_ops *ops, enum target_object object,
2358 const char *annex, gdb_byte **buf_p)
2359 {
2360 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2361 }
2362
2363 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2364 returned as a string, allocated using xmalloc. If an error occurs
2365 or the transfer is unsupported, NULL is returned. Empty objects
2366 are returned as allocated but empty strings. A warning is issued
2367 if the result contains any embedded NUL bytes. */
2368
2369 char *
2370 target_read_stralloc (struct target_ops *ops, enum target_object object,
2371 const char *annex)
2372 {
2373 gdb_byte *buffer;
2374 char *bufstr;
2375 LONGEST i, transferred;
2376
2377 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2378 bufstr = (char *) buffer;
2379
2380 if (transferred < 0)
2381 return NULL;
2382
2383 if (transferred == 0)
2384 return xstrdup ("");
2385
2386 bufstr[transferred] = 0;
2387
2388 /* Check for embedded NUL bytes; but allow trailing NULs. */
2389 for (i = strlen (bufstr); i < transferred; i++)
2390 if (bufstr[i] != 0)
2391 {
2392 warning (_("target object %d, annex %s, "
2393 "contained unexpected null characters"),
2394 (int) object, annex ? annex : "(none)");
2395 break;
2396 }
2397
2398 return bufstr;
2399 }
2400
2401 /* Memory transfer methods. */
2402
2403 void
2404 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2405 LONGEST len)
2406 {
2407 /* This method is used to read from an alternate, non-current
2408 target. This read must bypass the overlay support (as symbols
2409 don't match this target), and GDB's internal cache (wrong cache
2410 for this target). */
2411 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2412 != len)
2413 memory_error (TARGET_XFER_E_IO, addr);
2414 }
2415
2416 ULONGEST
2417 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2418 int len, enum bfd_endian byte_order)
2419 {
2420 gdb_byte buf[sizeof (ULONGEST)];
2421
2422 gdb_assert (len <= sizeof (buf));
2423 get_target_memory (ops, addr, buf, len);
2424 return extract_unsigned_integer (buf, len, byte_order);
2425 }
2426
2427 /* See target.h. */
2428
2429 int
2430 target_insert_breakpoint (struct gdbarch *gdbarch,
2431 struct bp_target_info *bp_tgt)
2432 {
2433 if (!may_insert_breakpoints)
2434 {
2435 warning (_("May not insert breakpoints"));
2436 return 1;
2437 }
2438
2439 return current_target.to_insert_breakpoint (&current_target,
2440 gdbarch, bp_tgt);
2441 }
2442
2443 /* See target.h. */
2444
2445 int
2446 target_remove_breakpoint (struct gdbarch *gdbarch,
2447 struct bp_target_info *bp_tgt)
2448 {
2449 /* This is kind of a weird case to handle, but the permission might
2450 have been changed after breakpoints were inserted - in which case
2451 we should just take the user literally and assume that any
2452 breakpoints should be left in place. */
2453 if (!may_insert_breakpoints)
2454 {
2455 warning (_("May not remove breakpoints"));
2456 return 1;
2457 }
2458
2459 return current_target.to_remove_breakpoint (&current_target,
2460 gdbarch, bp_tgt);
2461 }
2462
2463 static void
2464 target_info (char *args, int from_tty)
2465 {
2466 struct target_ops *t;
2467 int has_all_mem = 0;
2468
2469 if (symfile_objfile != NULL)
2470 printf_unfiltered (_("Symbols from \"%s\".\n"),
2471 objfile_name (symfile_objfile));
2472
2473 for (t = target_stack; t != NULL; t = t->beneath)
2474 {
2475 if (!(*t->to_has_memory) (t))
2476 continue;
2477
2478 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2479 continue;
2480 if (has_all_mem)
2481 printf_unfiltered (_("\tWhile running this, "
2482 "GDB does not access memory from...\n"));
2483 printf_unfiltered ("%s:\n", t->to_longname);
2484 (t->to_files_info) (t);
2485 has_all_mem = (*t->to_has_all_memory) (t);
2486 }
2487 }
2488
2489 /* This function is called before any new inferior is created, e.g.
2490 by running a program, attaching, or connecting to a target.
2491 It cleans up any state from previous invocations which might
2492 change between runs. This is a subset of what target_preopen
2493 resets (things which might change between targets). */
2494
2495 void
2496 target_pre_inferior (int from_tty)
2497 {
2498 /* Clear out solib state. Otherwise the solib state of the previous
2499 inferior might have survived and is entirely wrong for the new
2500 target. This has been observed on GNU/Linux using glibc 2.3. How
2501 to reproduce:
2502
2503 bash$ ./foo&
2504 [1] 4711
2505 bash$ ./foo&
2506 [1] 4712
2507 bash$ gdb ./foo
2508 [...]
2509 (gdb) attach 4711
2510 (gdb) detach
2511 (gdb) attach 4712
2512 Cannot access memory at address 0xdeadbeef
2513 */
2514
2515 /* In some OSs, the shared library list is the same/global/shared
2516 across inferiors. If code is shared between processes, so are
2517 memory regions and features. */
2518 if (!gdbarch_has_global_solist (target_gdbarch ()))
2519 {
2520 no_shared_libraries (NULL, from_tty);
2521
2522 invalidate_target_mem_regions ();
2523
2524 target_clear_description ();
2525 }
2526
2527 agent_capability_invalidate ();
2528 }
2529
2530 /* Callback for iterate_over_inferiors. Gets rid of the given
2531 inferior. */
2532
2533 static int
2534 dispose_inferior (struct inferior *inf, void *args)
2535 {
2536 struct thread_info *thread;
2537
2538 thread = any_thread_of_process (inf->pid);
2539 if (thread)
2540 {
2541 switch_to_thread (thread->ptid);
2542
2543 /* Core inferiors actually should be detached, not killed. */
2544 if (target_has_execution)
2545 target_kill ();
2546 else
2547 target_detach (NULL, 0);
2548 }
2549
2550 return 0;
2551 }
2552
2553 /* This is to be called by the open routine before it does
2554 anything. */
2555
2556 void
2557 target_preopen (int from_tty)
2558 {
2559 dont_repeat ();
2560
2561 if (have_inferiors ())
2562 {
2563 if (!from_tty
2564 || !have_live_inferiors ()
2565 || query (_("A program is being debugged already. Kill it? ")))
2566 iterate_over_inferiors (dispose_inferior, NULL);
2567 else
2568 error (_("Program not killed."));
2569 }
2570
2571 /* Calling target_kill may remove the target from the stack. But if
2572 it doesn't (which seems like a win for UDI), remove it now. */
2573 /* Leave the exec target, though. The user may be switching from a
2574 live process to a core of the same program. */
2575 pop_all_targets_above (file_stratum);
2576
2577 target_pre_inferior (from_tty);
2578 }
2579
2580 /* Detach a target after doing deferred register stores. */
2581
2582 void
2583 target_detach (const char *args, int from_tty)
2584 {
2585 struct target_ops* t;
2586
2587 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2588 /* Don't remove global breakpoints here. They're removed on
2589 disconnection from the target. */
2590 ;
2591 else
2592 /* If we're in breakpoints-always-inserted mode, have to remove
2593 them before detaching. */
2594 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2595
2596 prepare_for_detach ();
2597
2598 current_target.to_detach (&current_target, args, from_tty);
2599 if (targetdebug)
2600 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2601 args, from_tty);
2602 }
2603
2604 void
2605 target_disconnect (char *args, int from_tty)
2606 {
2607 struct target_ops *t;
2608
2609 /* If we're in breakpoints-always-inserted mode or if breakpoints
2610 are global across processes, we have to remove them before
2611 disconnecting. */
2612 remove_breakpoints ();
2613
2614 for (t = current_target.beneath; t != NULL; t = t->beneath)
2615 if (t->to_disconnect != NULL)
2616 {
2617 if (targetdebug)
2618 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2619 args, from_tty);
2620 t->to_disconnect (t, args, from_tty);
2621 return;
2622 }
2623
2624 tcomplain ();
2625 }
2626
2627 ptid_t
2628 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2629 {
2630 struct target_ops *t;
2631 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2632 status, options);
2633
2634 if (targetdebug)
2635 {
2636 char *status_string;
2637 char *options_string;
2638
2639 status_string = target_waitstatus_to_string (status);
2640 options_string = target_options_to_string (options);
2641 fprintf_unfiltered (gdb_stdlog,
2642 "target_wait (%d, status, options={%s})"
2643 " = %d, %s\n",
2644 ptid_get_pid (ptid), options_string,
2645 ptid_get_pid (retval), status_string);
2646 xfree (status_string);
2647 xfree (options_string);
2648 }
2649
2650 return retval;
2651 }
2652
2653 char *
2654 target_pid_to_str (ptid_t ptid)
2655 {
2656 struct target_ops *t;
2657
2658 for (t = current_target.beneath; t != NULL; t = t->beneath)
2659 {
2660 if (t->to_pid_to_str != NULL)
2661 return (*t->to_pid_to_str) (t, ptid);
2662 }
2663
2664 return normal_pid_to_str (ptid);
2665 }
2666
2667 char *
2668 target_thread_name (struct thread_info *info)
2669 {
2670 return current_target.to_thread_name (&current_target, info);
2671 }
2672
2673 void
2674 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2675 {
2676 struct target_ops *t;
2677
2678 target_dcache_invalidate ();
2679
2680 current_target.to_resume (&current_target, ptid, step, signal);
2681 if (targetdebug)
2682 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2683 ptid_get_pid (ptid),
2684 step ? "step" : "continue",
2685 gdb_signal_to_name (signal));
2686
2687 registers_changed_ptid (ptid);
2688 set_executing (ptid, 1);
2689 set_running (ptid, 1);
2690 clear_inline_frame_state (ptid);
2691 }
2692
2693 void
2694 target_pass_signals (int numsigs, unsigned char *pass_signals)
2695 {
2696 struct target_ops *t;
2697
2698 for (t = current_target.beneath; t != NULL; t = t->beneath)
2699 {
2700 if (t->to_pass_signals != NULL)
2701 {
2702 if (targetdebug)
2703 {
2704 int i;
2705
2706 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2707 numsigs);
2708
2709 for (i = 0; i < numsigs; i++)
2710 if (pass_signals[i])
2711 fprintf_unfiltered (gdb_stdlog, " %s",
2712 gdb_signal_to_name (i));
2713
2714 fprintf_unfiltered (gdb_stdlog, " })\n");
2715 }
2716
2717 (*t->to_pass_signals) (t, numsigs, pass_signals);
2718 return;
2719 }
2720 }
2721 }
2722
2723 void
2724 target_program_signals (int numsigs, unsigned char *program_signals)
2725 {
2726 struct target_ops *t;
2727
2728 for (t = current_target.beneath; t != NULL; t = t->beneath)
2729 {
2730 if (t->to_program_signals != NULL)
2731 {
2732 if (targetdebug)
2733 {
2734 int i;
2735
2736 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2737 numsigs);
2738
2739 for (i = 0; i < numsigs; i++)
2740 if (program_signals[i])
2741 fprintf_unfiltered (gdb_stdlog, " %s",
2742 gdb_signal_to_name (i));
2743
2744 fprintf_unfiltered (gdb_stdlog, " })\n");
2745 }
2746
2747 (*t->to_program_signals) (t, numsigs, program_signals);
2748 return;
2749 }
2750 }
2751 }
2752
2753 /* Look through the list of possible targets for a target that can
2754 follow forks. */
2755
2756 int
2757 target_follow_fork (int follow_child, int detach_fork)
2758 {
2759 struct target_ops *t;
2760
2761 for (t = current_target.beneath; t != NULL; t = t->beneath)
2762 {
2763 if (t->to_follow_fork != NULL)
2764 {
2765 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2766
2767 if (targetdebug)
2768 fprintf_unfiltered (gdb_stdlog,
2769 "target_follow_fork (%d, %d) = %d\n",
2770 follow_child, detach_fork, retval);
2771 return retval;
2772 }
2773 }
2774
2775 /* Some target returned a fork event, but did not know how to follow it. */
2776 internal_error (__FILE__, __LINE__,
2777 _("could not find a target to follow fork"));
2778 }
2779
2780 void
2781 target_mourn_inferior (void)
2782 {
2783 struct target_ops *t;
2784
2785 for (t = current_target.beneath; t != NULL; t = t->beneath)
2786 {
2787 if (t->to_mourn_inferior != NULL)
2788 {
2789 t->to_mourn_inferior (t);
2790 if (targetdebug)
2791 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2792
2793 /* We no longer need to keep handles on any of the object files.
2794 Make sure to release them to avoid unnecessarily locking any
2795 of them while we're not actually debugging. */
2796 bfd_cache_close_all ();
2797
2798 return;
2799 }
2800 }
2801
2802 internal_error (__FILE__, __LINE__,
2803 _("could not find a target to follow mourn inferior"));
2804 }
2805
2806 /* Look for a target which can describe architectural features, starting
2807 from TARGET. If we find one, return its description. */
2808
2809 const struct target_desc *
2810 target_read_description (struct target_ops *target)
2811 {
2812 struct target_ops *t;
2813
2814 for (t = target; t != NULL; t = t->beneath)
2815 if (t->to_read_description != NULL)
2816 {
2817 const struct target_desc *tdesc;
2818
2819 tdesc = t->to_read_description (t);
2820 if (tdesc)
2821 return tdesc;
2822 }
2823
2824 return NULL;
2825 }
2826
2827 /* The default implementation of to_search_memory.
2828 This implements a basic search of memory, reading target memory and
2829 performing the search here (as opposed to performing the search in on the
2830 target side with, for example, gdbserver). */
2831
2832 int
2833 simple_search_memory (struct target_ops *ops,
2834 CORE_ADDR start_addr, ULONGEST search_space_len,
2835 const gdb_byte *pattern, ULONGEST pattern_len,
2836 CORE_ADDR *found_addrp)
2837 {
2838 /* NOTE: also defined in find.c testcase. */
2839 #define SEARCH_CHUNK_SIZE 16000
2840 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2841 /* Buffer to hold memory contents for searching. */
2842 gdb_byte *search_buf;
2843 unsigned search_buf_size;
2844 struct cleanup *old_cleanups;
2845
2846 search_buf_size = chunk_size + pattern_len - 1;
2847
2848 /* No point in trying to allocate a buffer larger than the search space. */
2849 if (search_space_len < search_buf_size)
2850 search_buf_size = search_space_len;
2851
2852 search_buf = malloc (search_buf_size);
2853 if (search_buf == NULL)
2854 error (_("Unable to allocate memory to perform the search."));
2855 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2856
2857 /* Prime the search buffer. */
2858
2859 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2860 search_buf, start_addr, search_buf_size) != search_buf_size)
2861 {
2862 warning (_("Unable to access %s bytes of target "
2863 "memory at %s, halting search."),
2864 pulongest (search_buf_size), hex_string (start_addr));
2865 do_cleanups (old_cleanups);
2866 return -1;
2867 }
2868
2869 /* Perform the search.
2870
2871 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2872 When we've scanned N bytes we copy the trailing bytes to the start and
2873 read in another N bytes. */
2874
2875 while (search_space_len >= pattern_len)
2876 {
2877 gdb_byte *found_ptr;
2878 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2879
2880 found_ptr = memmem (search_buf, nr_search_bytes,
2881 pattern, pattern_len);
2882
2883 if (found_ptr != NULL)
2884 {
2885 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2886
2887 *found_addrp = found_addr;
2888 do_cleanups (old_cleanups);
2889 return 1;
2890 }
2891
2892 /* Not found in this chunk, skip to next chunk. */
2893
2894 /* Don't let search_space_len wrap here, it's unsigned. */
2895 if (search_space_len >= chunk_size)
2896 search_space_len -= chunk_size;
2897 else
2898 search_space_len = 0;
2899
2900 if (search_space_len >= pattern_len)
2901 {
2902 unsigned keep_len = search_buf_size - chunk_size;
2903 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2904 int nr_to_read;
2905
2906 /* Copy the trailing part of the previous iteration to the front
2907 of the buffer for the next iteration. */
2908 gdb_assert (keep_len == pattern_len - 1);
2909 memcpy (search_buf, search_buf + chunk_size, keep_len);
2910
2911 nr_to_read = min (search_space_len - keep_len, chunk_size);
2912
2913 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2914 search_buf + keep_len, read_addr,
2915 nr_to_read) != nr_to_read)
2916 {
2917 warning (_("Unable to access %s bytes of target "
2918 "memory at %s, halting search."),
2919 plongest (nr_to_read),
2920 hex_string (read_addr));
2921 do_cleanups (old_cleanups);
2922 return -1;
2923 }
2924
2925 start_addr += chunk_size;
2926 }
2927 }
2928
2929 /* Not found. */
2930
2931 do_cleanups (old_cleanups);
2932 return 0;
2933 }
2934
2935 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2936 sequence of bytes in PATTERN with length PATTERN_LEN.
2937
2938 The result is 1 if found, 0 if not found, and -1 if there was an error
2939 requiring halting of the search (e.g. memory read error).
2940 If the pattern is found the address is recorded in FOUND_ADDRP. */
2941
2942 int
2943 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2944 const gdb_byte *pattern, ULONGEST pattern_len,
2945 CORE_ADDR *found_addrp)
2946 {
2947 struct target_ops *t;
2948 int found;
2949
2950 /* We don't use INHERIT to set current_target.to_search_memory,
2951 so we have to scan the target stack and handle targetdebug
2952 ourselves. */
2953
2954 if (targetdebug)
2955 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2956 hex_string (start_addr));
2957
2958 for (t = current_target.beneath; t != NULL; t = t->beneath)
2959 if (t->to_search_memory != NULL)
2960 break;
2961
2962 if (t != NULL)
2963 {
2964 found = t->to_search_memory (t, start_addr, search_space_len,
2965 pattern, pattern_len, found_addrp);
2966 }
2967 else
2968 {
2969 /* If a special version of to_search_memory isn't available, use the
2970 simple version. */
2971 found = simple_search_memory (current_target.beneath,
2972 start_addr, search_space_len,
2973 pattern, pattern_len, found_addrp);
2974 }
2975
2976 if (targetdebug)
2977 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2978
2979 return found;
2980 }
2981
2982 /* Look through the currently pushed targets. If none of them will
2983 be able to restart the currently running process, issue an error
2984 message. */
2985
2986 void
2987 target_require_runnable (void)
2988 {
2989 struct target_ops *t;
2990
2991 for (t = target_stack; t != NULL; t = t->beneath)
2992 {
2993 /* If this target knows how to create a new program, then
2994 assume we will still be able to after killing the current
2995 one. Either killing and mourning will not pop T, or else
2996 find_default_run_target will find it again. */
2997 if (t->to_create_inferior != NULL)
2998 return;
2999
3000 /* Do not worry about thread_stratum targets that can not
3001 create inferiors. Assume they will be pushed again if
3002 necessary, and continue to the process_stratum. */
3003 if (t->to_stratum == thread_stratum
3004 || t->to_stratum == arch_stratum)
3005 continue;
3006
3007 error (_("The \"%s\" target does not support \"run\". "
3008 "Try \"help target\" or \"continue\"."),
3009 t->to_shortname);
3010 }
3011
3012 /* This function is only called if the target is running. In that
3013 case there should have been a process_stratum target and it
3014 should either know how to create inferiors, or not... */
3015 internal_error (__FILE__, __LINE__, _("No targets found"));
3016 }
3017
3018 /* Look through the list of possible targets for a target that can
3019 execute a run or attach command without any other data. This is
3020 used to locate the default process stratum.
3021
3022 If DO_MESG is not NULL, the result is always valid (error() is
3023 called for errors); else, return NULL on error. */
3024
3025 static struct target_ops *
3026 find_default_run_target (char *do_mesg)
3027 {
3028 struct target_ops **t;
3029 struct target_ops *runable = NULL;
3030 int count;
3031
3032 count = 0;
3033
3034 for (t = target_structs; t < target_structs + target_struct_size;
3035 ++t)
3036 {
3037 if ((*t)->to_can_run && target_can_run (*t))
3038 {
3039 runable = *t;
3040 ++count;
3041 }
3042 }
3043
3044 if (count != 1)
3045 {
3046 if (do_mesg)
3047 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3048 else
3049 return NULL;
3050 }
3051
3052 return runable;
3053 }
3054
3055 void
3056 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3057 {
3058 struct target_ops *t;
3059
3060 t = find_default_run_target ("attach");
3061 (t->to_attach) (t, args, from_tty);
3062 return;
3063 }
3064
3065 void
3066 find_default_create_inferior (struct target_ops *ops,
3067 char *exec_file, char *allargs, char **env,
3068 int from_tty)
3069 {
3070 struct target_ops *t;
3071
3072 t = find_default_run_target ("run");
3073 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3074 return;
3075 }
3076
3077 static int
3078 find_default_can_async_p (struct target_ops *ignore)
3079 {
3080 struct target_ops *t;
3081
3082 /* This may be called before the target is pushed on the stack;
3083 look for the default process stratum. If there's none, gdb isn't
3084 configured with a native debugger, and target remote isn't
3085 connected yet. */
3086 t = find_default_run_target (NULL);
3087 if (t && t->to_can_async_p != delegate_can_async_p)
3088 return (t->to_can_async_p) (t);
3089 return 0;
3090 }
3091
3092 static int
3093 find_default_is_async_p (struct target_ops *ignore)
3094 {
3095 struct target_ops *t;
3096
3097 /* This may be called before the target is pushed on the stack;
3098 look for the default process stratum. If there's none, gdb isn't
3099 configured with a native debugger, and target remote isn't
3100 connected yet. */
3101 t = find_default_run_target (NULL);
3102 if (t && t->to_is_async_p != delegate_is_async_p)
3103 return (t->to_is_async_p) (t);
3104 return 0;
3105 }
3106
3107 static int
3108 find_default_supports_non_stop (struct target_ops *self)
3109 {
3110 struct target_ops *t;
3111
3112 t = find_default_run_target (NULL);
3113 if (t && t->to_supports_non_stop)
3114 return (t->to_supports_non_stop) (t);
3115 return 0;
3116 }
3117
3118 int
3119 target_supports_non_stop (void)
3120 {
3121 struct target_ops *t;
3122
3123 for (t = &current_target; t != NULL; t = t->beneath)
3124 if (t->to_supports_non_stop)
3125 return t->to_supports_non_stop (t);
3126
3127 return 0;
3128 }
3129
3130 /* Implement the "info proc" command. */
3131
3132 int
3133 target_info_proc (char *args, enum info_proc_what what)
3134 {
3135 struct target_ops *t;
3136
3137 /* If we're already connected to something that can get us OS
3138 related data, use it. Otherwise, try using the native
3139 target. */
3140 if (current_target.to_stratum >= process_stratum)
3141 t = current_target.beneath;
3142 else
3143 t = find_default_run_target (NULL);
3144
3145 for (; t != NULL; t = t->beneath)
3146 {
3147 if (t->to_info_proc != NULL)
3148 {
3149 t->to_info_proc (t, args, what);
3150
3151 if (targetdebug)
3152 fprintf_unfiltered (gdb_stdlog,
3153 "target_info_proc (\"%s\", %d)\n", args, what);
3154
3155 return 1;
3156 }
3157 }
3158
3159 return 0;
3160 }
3161
3162 static int
3163 find_default_supports_disable_randomization (struct target_ops *self)
3164 {
3165 struct target_ops *t;
3166
3167 t = find_default_run_target (NULL);
3168 if (t && t->to_supports_disable_randomization)
3169 return (t->to_supports_disable_randomization) (t);
3170 return 0;
3171 }
3172
3173 int
3174 target_supports_disable_randomization (void)
3175 {
3176 struct target_ops *t;
3177
3178 for (t = &current_target; t != NULL; t = t->beneath)
3179 if (t->to_supports_disable_randomization)
3180 return t->to_supports_disable_randomization (t);
3181
3182 return 0;
3183 }
3184
3185 char *
3186 target_get_osdata (const char *type)
3187 {
3188 struct target_ops *t;
3189
3190 /* If we're already connected to something that can get us OS
3191 related data, use it. Otherwise, try using the native
3192 target. */
3193 if (current_target.to_stratum >= process_stratum)
3194 t = current_target.beneath;
3195 else
3196 t = find_default_run_target ("get OS data");
3197
3198 if (!t)
3199 return NULL;
3200
3201 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3202 }
3203
3204 /* Determine the current address space of thread PTID. */
3205
3206 struct address_space *
3207 target_thread_address_space (ptid_t ptid)
3208 {
3209 struct address_space *aspace;
3210 struct inferior *inf;
3211 struct target_ops *t;
3212
3213 for (t = current_target.beneath; t != NULL; t = t->beneath)
3214 {
3215 if (t->to_thread_address_space != NULL)
3216 {
3217 aspace = t->to_thread_address_space (t, ptid);
3218 gdb_assert (aspace);
3219
3220 if (targetdebug)
3221 fprintf_unfiltered (gdb_stdlog,
3222 "target_thread_address_space (%s) = %d\n",
3223 target_pid_to_str (ptid),
3224 address_space_num (aspace));
3225 return aspace;
3226 }
3227 }
3228
3229 /* Fall-back to the "main" address space of the inferior. */
3230 inf = find_inferior_pid (ptid_get_pid (ptid));
3231
3232 if (inf == NULL || inf->aspace == NULL)
3233 internal_error (__FILE__, __LINE__,
3234 _("Can't determine the current "
3235 "address space of thread %s\n"),
3236 target_pid_to_str (ptid));
3237
3238 return inf->aspace;
3239 }
3240
3241
3242 /* Target file operations. */
3243
3244 static struct target_ops *
3245 default_fileio_target (void)
3246 {
3247 /* If we're already connected to something that can perform
3248 file I/O, use it. Otherwise, try using the native target. */
3249 if (current_target.to_stratum >= process_stratum)
3250 return current_target.beneath;
3251 else
3252 return find_default_run_target ("file I/O");
3253 }
3254
3255 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3256 target file descriptor, or -1 if an error occurs (and set
3257 *TARGET_ERRNO). */
3258 int
3259 target_fileio_open (const char *filename, int flags, int mode,
3260 int *target_errno)
3261 {
3262 struct target_ops *t;
3263
3264 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3265 {
3266 if (t->to_fileio_open != NULL)
3267 {
3268 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3269
3270 if (targetdebug)
3271 fprintf_unfiltered (gdb_stdlog,
3272 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3273 filename, flags, mode,
3274 fd, fd != -1 ? 0 : *target_errno);
3275 return fd;
3276 }
3277 }
3278
3279 *target_errno = FILEIO_ENOSYS;
3280 return -1;
3281 }
3282
3283 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3284 Return the number of bytes written, or -1 if an error occurs
3285 (and set *TARGET_ERRNO). */
3286 int
3287 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3288 ULONGEST offset, int *target_errno)
3289 {
3290 struct target_ops *t;
3291
3292 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3293 {
3294 if (t->to_fileio_pwrite != NULL)
3295 {
3296 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3297 target_errno);
3298
3299 if (targetdebug)
3300 fprintf_unfiltered (gdb_stdlog,
3301 "target_fileio_pwrite (%d,...,%d,%s) "
3302 "= %d (%d)\n",
3303 fd, len, pulongest (offset),
3304 ret, ret != -1 ? 0 : *target_errno);
3305 return ret;
3306 }
3307 }
3308
3309 *target_errno = FILEIO_ENOSYS;
3310 return -1;
3311 }
3312
3313 /* Read up to LEN bytes FD on the target into READ_BUF.
3314 Return the number of bytes read, or -1 if an error occurs
3315 (and set *TARGET_ERRNO). */
3316 int
3317 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3318 ULONGEST offset, int *target_errno)
3319 {
3320 struct target_ops *t;
3321
3322 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3323 {
3324 if (t->to_fileio_pread != NULL)
3325 {
3326 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3327 target_errno);
3328
3329 if (targetdebug)
3330 fprintf_unfiltered (gdb_stdlog,
3331 "target_fileio_pread (%d,...,%d,%s) "
3332 "= %d (%d)\n",
3333 fd, len, pulongest (offset),
3334 ret, ret != -1 ? 0 : *target_errno);
3335 return ret;
3336 }
3337 }
3338
3339 *target_errno = FILEIO_ENOSYS;
3340 return -1;
3341 }
3342
3343 /* Close FD on the target. Return 0, or -1 if an error occurs
3344 (and set *TARGET_ERRNO). */
3345 int
3346 target_fileio_close (int fd, int *target_errno)
3347 {
3348 struct target_ops *t;
3349
3350 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3351 {
3352 if (t->to_fileio_close != NULL)
3353 {
3354 int ret = t->to_fileio_close (t, fd, target_errno);
3355
3356 if (targetdebug)
3357 fprintf_unfiltered (gdb_stdlog,
3358 "target_fileio_close (%d) = %d (%d)\n",
3359 fd, ret, ret != -1 ? 0 : *target_errno);
3360 return ret;
3361 }
3362 }
3363
3364 *target_errno = FILEIO_ENOSYS;
3365 return -1;
3366 }
3367
3368 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3369 occurs (and set *TARGET_ERRNO). */
3370 int
3371 target_fileio_unlink (const char *filename, int *target_errno)
3372 {
3373 struct target_ops *t;
3374
3375 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3376 {
3377 if (t->to_fileio_unlink != NULL)
3378 {
3379 int ret = t->to_fileio_unlink (t, filename, target_errno);
3380
3381 if (targetdebug)
3382 fprintf_unfiltered (gdb_stdlog,
3383 "target_fileio_unlink (%s) = %d (%d)\n",
3384 filename, ret, ret != -1 ? 0 : *target_errno);
3385 return ret;
3386 }
3387 }
3388
3389 *target_errno = FILEIO_ENOSYS;
3390 return -1;
3391 }
3392
3393 /* Read value of symbolic link FILENAME on the target. Return a
3394 null-terminated string allocated via xmalloc, or NULL if an error
3395 occurs (and set *TARGET_ERRNO). */
3396 char *
3397 target_fileio_readlink (const char *filename, int *target_errno)
3398 {
3399 struct target_ops *t;
3400
3401 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3402 {
3403 if (t->to_fileio_readlink != NULL)
3404 {
3405 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3406
3407 if (targetdebug)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "target_fileio_readlink (%s) = %s (%d)\n",
3410 filename, ret? ret : "(nil)",
3411 ret? 0 : *target_errno);
3412 return ret;
3413 }
3414 }
3415
3416 *target_errno = FILEIO_ENOSYS;
3417 return NULL;
3418 }
3419
3420 static void
3421 target_fileio_close_cleanup (void *opaque)
3422 {
3423 int fd = *(int *) opaque;
3424 int target_errno;
3425
3426 target_fileio_close (fd, &target_errno);
3427 }
3428
3429 /* Read target file FILENAME. Store the result in *BUF_P and
3430 return the size of the transferred data. PADDING additional bytes are
3431 available in *BUF_P. This is a helper function for
3432 target_fileio_read_alloc; see the declaration of that function for more
3433 information. */
3434
3435 static LONGEST
3436 target_fileio_read_alloc_1 (const char *filename,
3437 gdb_byte **buf_p, int padding)
3438 {
3439 struct cleanup *close_cleanup;
3440 size_t buf_alloc, buf_pos;
3441 gdb_byte *buf;
3442 LONGEST n;
3443 int fd;
3444 int target_errno;
3445
3446 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3447 if (fd == -1)
3448 return -1;
3449
3450 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3451
3452 /* Start by reading up to 4K at a time. The target will throttle
3453 this number down if necessary. */
3454 buf_alloc = 4096;
3455 buf = xmalloc (buf_alloc);
3456 buf_pos = 0;
3457 while (1)
3458 {
3459 n = target_fileio_pread (fd, &buf[buf_pos],
3460 buf_alloc - buf_pos - padding, buf_pos,
3461 &target_errno);
3462 if (n < 0)
3463 {
3464 /* An error occurred. */
3465 do_cleanups (close_cleanup);
3466 xfree (buf);
3467 return -1;
3468 }
3469 else if (n == 0)
3470 {
3471 /* Read all there was. */
3472 do_cleanups (close_cleanup);
3473 if (buf_pos == 0)
3474 xfree (buf);
3475 else
3476 *buf_p = buf;
3477 return buf_pos;
3478 }
3479
3480 buf_pos += n;
3481
3482 /* If the buffer is filling up, expand it. */
3483 if (buf_alloc < buf_pos * 2)
3484 {
3485 buf_alloc *= 2;
3486 buf = xrealloc (buf, buf_alloc);
3487 }
3488
3489 QUIT;
3490 }
3491 }
3492
3493 /* Read target file FILENAME. Store the result in *BUF_P and return
3494 the size of the transferred data. See the declaration in "target.h"
3495 function for more information about the return value. */
3496
3497 LONGEST
3498 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3499 {
3500 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3501 }
3502
3503 /* Read target file FILENAME. The result is NUL-terminated and
3504 returned as a string, allocated using xmalloc. If an error occurs
3505 or the transfer is unsupported, NULL is returned. Empty objects
3506 are returned as allocated but empty strings. A warning is issued
3507 if the result contains any embedded NUL bytes. */
3508
3509 char *
3510 target_fileio_read_stralloc (const char *filename)
3511 {
3512 gdb_byte *buffer;
3513 char *bufstr;
3514 LONGEST i, transferred;
3515
3516 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3517 bufstr = (char *) buffer;
3518
3519 if (transferred < 0)
3520 return NULL;
3521
3522 if (transferred == 0)
3523 return xstrdup ("");
3524
3525 bufstr[transferred] = 0;
3526
3527 /* Check for embedded NUL bytes; but allow trailing NULs. */
3528 for (i = strlen (bufstr); i < transferred; i++)
3529 if (bufstr[i] != 0)
3530 {
3531 warning (_("target file %s "
3532 "contained unexpected null characters"),
3533 filename);
3534 break;
3535 }
3536
3537 return bufstr;
3538 }
3539
3540
3541 static int
3542 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3543 CORE_ADDR addr, int len)
3544 {
3545 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3546 }
3547
3548 static int
3549 default_watchpoint_addr_within_range (struct target_ops *target,
3550 CORE_ADDR addr,
3551 CORE_ADDR start, int length)
3552 {
3553 return addr >= start && addr < start + length;
3554 }
3555
3556 static struct gdbarch *
3557 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3558 {
3559 return target_gdbarch ();
3560 }
3561
3562 static int
3563 return_zero (void)
3564 {
3565 return 0;
3566 }
3567
3568 static int
3569 return_minus_one (void)
3570 {
3571 return -1;
3572 }
3573
3574 static void *
3575 return_null (void)
3576 {
3577 return 0;
3578 }
3579
3580 /*
3581 * Find the next target down the stack from the specified target.
3582 */
3583
3584 struct target_ops *
3585 find_target_beneath (struct target_ops *t)
3586 {
3587 return t->beneath;
3588 }
3589
3590 /* See target.h. */
3591
3592 struct target_ops *
3593 find_target_at (enum strata stratum)
3594 {
3595 struct target_ops *t;
3596
3597 for (t = current_target.beneath; t != NULL; t = t->beneath)
3598 if (t->to_stratum == stratum)
3599 return t;
3600
3601 return NULL;
3602 }
3603
3604 \f
3605 /* The inferior process has died. Long live the inferior! */
3606
3607 void
3608 generic_mourn_inferior (void)
3609 {
3610 ptid_t ptid;
3611
3612 ptid = inferior_ptid;
3613 inferior_ptid = null_ptid;
3614
3615 /* Mark breakpoints uninserted in case something tries to delete a
3616 breakpoint while we delete the inferior's threads (which would
3617 fail, since the inferior is long gone). */
3618 mark_breakpoints_out ();
3619
3620 if (!ptid_equal (ptid, null_ptid))
3621 {
3622 int pid = ptid_get_pid (ptid);
3623 exit_inferior (pid);
3624 }
3625
3626 /* Note this wipes step-resume breakpoints, so needs to be done
3627 after exit_inferior, which ends up referencing the step-resume
3628 breakpoints through clear_thread_inferior_resources. */
3629 breakpoint_init_inferior (inf_exited);
3630
3631 registers_changed ();
3632
3633 reopen_exec_file ();
3634 reinit_frame_cache ();
3635
3636 if (deprecated_detach_hook)
3637 deprecated_detach_hook ();
3638 }
3639 \f
3640 /* Convert a normal process ID to a string. Returns the string in a
3641 static buffer. */
3642
3643 char *
3644 normal_pid_to_str (ptid_t ptid)
3645 {
3646 static char buf[32];
3647
3648 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3649 return buf;
3650 }
3651
3652 static char *
3653 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3654 {
3655 return normal_pid_to_str (ptid);
3656 }
3657
3658 /* Error-catcher for target_find_memory_regions. */
3659 static int
3660 dummy_find_memory_regions (struct target_ops *self,
3661 find_memory_region_ftype ignore1, void *ignore2)
3662 {
3663 error (_("Command not implemented for this target."));
3664 return 0;
3665 }
3666
3667 /* Error-catcher for target_make_corefile_notes. */
3668 static char *
3669 dummy_make_corefile_notes (struct target_ops *self,
3670 bfd *ignore1, int *ignore2)
3671 {
3672 error (_("Command not implemented for this target."));
3673 return NULL;
3674 }
3675
3676 /* Error-catcher for target_get_bookmark. */
3677 static gdb_byte *
3678 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3679 {
3680 tcomplain ();
3681 return NULL;
3682 }
3683
3684 /* Error-catcher for target_goto_bookmark. */
3685 static void
3686 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3687 {
3688 tcomplain ();
3689 }
3690
3691 /* Set up the handful of non-empty slots needed by the dummy target
3692 vector. */
3693
3694 static void
3695 init_dummy_target (void)
3696 {
3697 dummy_target.to_shortname = "None";
3698 dummy_target.to_longname = "None";
3699 dummy_target.to_doc = "";
3700 dummy_target.to_create_inferior = find_default_create_inferior;
3701 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3702 dummy_target.to_supports_disable_randomization
3703 = find_default_supports_disable_randomization;
3704 dummy_target.to_pid_to_str = dummy_pid_to_str;
3705 dummy_target.to_stratum = dummy_stratum;
3706 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3707 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3708 dummy_target.to_get_bookmark = dummy_get_bookmark;
3709 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3710 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3711 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3712 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3713 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3714 dummy_target.to_has_execution
3715 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3716 dummy_target.to_magic = OPS_MAGIC;
3717
3718 install_dummy_methods (&dummy_target);
3719 }
3720 \f
3721 static void
3722 debug_to_open (char *args, int from_tty)
3723 {
3724 debug_target.to_open (args, from_tty);
3725
3726 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3727 }
3728
3729 void
3730 target_close (struct target_ops *targ)
3731 {
3732 gdb_assert (!target_is_pushed (targ));
3733
3734 if (targ->to_xclose != NULL)
3735 targ->to_xclose (targ);
3736 else if (targ->to_close != NULL)
3737 targ->to_close (targ);
3738
3739 if (targetdebug)
3740 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3741 }
3742
3743 void
3744 target_attach (char *args, int from_tty)
3745 {
3746 current_target.to_attach (&current_target, args, from_tty);
3747 if (targetdebug)
3748 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3749 args, from_tty);
3750 }
3751
3752 int
3753 target_thread_alive (ptid_t ptid)
3754 {
3755 struct target_ops *t;
3756
3757 for (t = current_target.beneath; t != NULL; t = t->beneath)
3758 {
3759 if (t->to_thread_alive != NULL)
3760 {
3761 int retval;
3762
3763 retval = t->to_thread_alive (t, ptid);
3764 if (targetdebug)
3765 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3766 ptid_get_pid (ptid), retval);
3767
3768 return retval;
3769 }
3770 }
3771
3772 return 0;
3773 }
3774
3775 void
3776 target_find_new_threads (void)
3777 {
3778 struct target_ops *t;
3779
3780 for (t = current_target.beneath; t != NULL; t = t->beneath)
3781 {
3782 if (t->to_find_new_threads != NULL)
3783 {
3784 t->to_find_new_threads (t);
3785 if (targetdebug)
3786 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3787
3788 return;
3789 }
3790 }
3791 }
3792
3793 void
3794 target_stop (ptid_t ptid)
3795 {
3796 if (!may_stop)
3797 {
3798 warning (_("May not interrupt or stop the target, ignoring attempt"));
3799 return;
3800 }
3801
3802 (*current_target.to_stop) (&current_target, ptid);
3803 }
3804
3805 static void
3806 debug_to_post_attach (struct target_ops *self, int pid)
3807 {
3808 debug_target.to_post_attach (&debug_target, pid);
3809
3810 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3811 }
3812
3813 /* Concatenate ELEM to LIST, a comma separate list, and return the
3814 result. The LIST incoming argument is released. */
3815
3816 static char *
3817 str_comma_list_concat_elem (char *list, const char *elem)
3818 {
3819 if (list == NULL)
3820 return xstrdup (elem);
3821 else
3822 return reconcat (list, list, ", ", elem, (char *) NULL);
3823 }
3824
3825 /* Helper for target_options_to_string. If OPT is present in
3826 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3827 Returns the new resulting string. OPT is removed from
3828 TARGET_OPTIONS. */
3829
3830 static char *
3831 do_option (int *target_options, char *ret,
3832 int opt, char *opt_str)
3833 {
3834 if ((*target_options & opt) != 0)
3835 {
3836 ret = str_comma_list_concat_elem (ret, opt_str);
3837 *target_options &= ~opt;
3838 }
3839
3840 return ret;
3841 }
3842
3843 char *
3844 target_options_to_string (int target_options)
3845 {
3846 char *ret = NULL;
3847
3848 #define DO_TARG_OPTION(OPT) \
3849 ret = do_option (&target_options, ret, OPT, #OPT)
3850
3851 DO_TARG_OPTION (TARGET_WNOHANG);
3852
3853 if (target_options != 0)
3854 ret = str_comma_list_concat_elem (ret, "unknown???");
3855
3856 if (ret == NULL)
3857 ret = xstrdup ("");
3858 return ret;
3859 }
3860
3861 static void
3862 debug_print_register (const char * func,
3863 struct regcache *regcache, int regno)
3864 {
3865 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3866
3867 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3868 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3869 && gdbarch_register_name (gdbarch, regno) != NULL
3870 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3871 fprintf_unfiltered (gdb_stdlog, "(%s)",
3872 gdbarch_register_name (gdbarch, regno));
3873 else
3874 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3875 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3876 {
3877 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3878 int i, size = register_size (gdbarch, regno);
3879 gdb_byte buf[MAX_REGISTER_SIZE];
3880
3881 regcache_raw_collect (regcache, regno, buf);
3882 fprintf_unfiltered (gdb_stdlog, " = ");
3883 for (i = 0; i < size; i++)
3884 {
3885 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3886 }
3887 if (size <= sizeof (LONGEST))
3888 {
3889 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3890
3891 fprintf_unfiltered (gdb_stdlog, " %s %s",
3892 core_addr_to_string_nz (val), plongest (val));
3893 }
3894 }
3895 fprintf_unfiltered (gdb_stdlog, "\n");
3896 }
3897
3898 void
3899 target_fetch_registers (struct regcache *regcache, int regno)
3900 {
3901 struct target_ops *t;
3902
3903 for (t = current_target.beneath; t != NULL; t = t->beneath)
3904 {
3905 if (t->to_fetch_registers != NULL)
3906 {
3907 t->to_fetch_registers (t, regcache, regno);
3908 if (targetdebug)
3909 debug_print_register ("target_fetch_registers", regcache, regno);
3910 return;
3911 }
3912 }
3913 }
3914
3915 void
3916 target_store_registers (struct regcache *regcache, int regno)
3917 {
3918 struct target_ops *t;
3919
3920 if (!may_write_registers)
3921 error (_("Writing to registers is not allowed (regno %d)"), regno);
3922
3923 current_target.to_store_registers (&current_target, regcache, regno);
3924 if (targetdebug)
3925 {
3926 debug_print_register ("target_store_registers", regcache, regno);
3927 }
3928 }
3929
3930 int
3931 target_core_of_thread (ptid_t ptid)
3932 {
3933 struct target_ops *t;
3934
3935 for (t = current_target.beneath; t != NULL; t = t->beneath)
3936 {
3937 if (t->to_core_of_thread != NULL)
3938 {
3939 int retval = t->to_core_of_thread (t, ptid);
3940
3941 if (targetdebug)
3942 fprintf_unfiltered (gdb_stdlog,
3943 "target_core_of_thread (%d) = %d\n",
3944 ptid_get_pid (ptid), retval);
3945 return retval;
3946 }
3947 }
3948
3949 return -1;
3950 }
3951
3952 int
3953 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3954 {
3955 struct target_ops *t;
3956
3957 for (t = current_target.beneath; t != NULL; t = t->beneath)
3958 {
3959 if (t->to_verify_memory != NULL)
3960 {
3961 int retval = t->to_verify_memory (t, data, memaddr, size);
3962
3963 if (targetdebug)
3964 fprintf_unfiltered (gdb_stdlog,
3965 "target_verify_memory (%s, %s) = %d\n",
3966 paddress (target_gdbarch (), memaddr),
3967 pulongest (size),
3968 retval);
3969 return retval;
3970 }
3971 }
3972
3973 tcomplain ();
3974 }
3975
3976 /* The documentation for this function is in its prototype declaration in
3977 target.h. */
3978
3979 int
3980 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3981 {
3982 struct target_ops *t;
3983
3984 for (t = current_target.beneath; t != NULL; t = t->beneath)
3985 if (t->to_insert_mask_watchpoint != NULL)
3986 {
3987 int ret;
3988
3989 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3990
3991 if (targetdebug)
3992 fprintf_unfiltered (gdb_stdlog, "\
3993 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3994 core_addr_to_string (addr),
3995 core_addr_to_string (mask), rw, ret);
3996
3997 return ret;
3998 }
3999
4000 return 1;
4001 }
4002
4003 /* The documentation for this function is in its prototype declaration in
4004 target.h. */
4005
4006 int
4007 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4008 {
4009 struct target_ops *t;
4010
4011 for (t = current_target.beneath; t != NULL; t = t->beneath)
4012 if (t->to_remove_mask_watchpoint != NULL)
4013 {
4014 int ret;
4015
4016 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4017
4018 if (targetdebug)
4019 fprintf_unfiltered (gdb_stdlog, "\
4020 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4021 core_addr_to_string (addr),
4022 core_addr_to_string (mask), rw, ret);
4023
4024 return ret;
4025 }
4026
4027 return 1;
4028 }
4029
4030 /* The documentation for this function is in its prototype declaration
4031 in target.h. */
4032
4033 int
4034 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4035 {
4036 struct target_ops *t;
4037
4038 for (t = current_target.beneath; t != NULL; t = t->beneath)
4039 if (t->to_masked_watch_num_registers != NULL)
4040 return t->to_masked_watch_num_registers (t, addr, mask);
4041
4042 return -1;
4043 }
4044
4045 /* The documentation for this function is in its prototype declaration
4046 in target.h. */
4047
4048 int
4049 target_ranged_break_num_registers (void)
4050 {
4051 struct target_ops *t;
4052
4053 for (t = current_target.beneath; t != NULL; t = t->beneath)
4054 if (t->to_ranged_break_num_registers != NULL)
4055 return t->to_ranged_break_num_registers (t);
4056
4057 return -1;
4058 }
4059
4060 /* See target.h. */
4061
4062 struct btrace_target_info *
4063 target_enable_btrace (ptid_t ptid)
4064 {
4065 struct target_ops *t;
4066
4067 for (t = current_target.beneath; t != NULL; t = t->beneath)
4068 if (t->to_enable_btrace != NULL)
4069 return t->to_enable_btrace (t, ptid);
4070
4071 tcomplain ();
4072 return NULL;
4073 }
4074
4075 /* See target.h. */
4076
4077 void
4078 target_disable_btrace (struct btrace_target_info *btinfo)
4079 {
4080 struct target_ops *t;
4081
4082 for (t = current_target.beneath; t != NULL; t = t->beneath)
4083 if (t->to_disable_btrace != NULL)
4084 {
4085 t->to_disable_btrace (t, btinfo);
4086 return;
4087 }
4088
4089 tcomplain ();
4090 }
4091
4092 /* See target.h. */
4093
4094 void
4095 target_teardown_btrace (struct btrace_target_info *btinfo)
4096 {
4097 struct target_ops *t;
4098
4099 for (t = current_target.beneath; t != NULL; t = t->beneath)
4100 if (t->to_teardown_btrace != NULL)
4101 {
4102 t->to_teardown_btrace (t, btinfo);
4103 return;
4104 }
4105
4106 tcomplain ();
4107 }
4108
4109 /* See target.h. */
4110
4111 enum btrace_error
4112 target_read_btrace (VEC (btrace_block_s) **btrace,
4113 struct btrace_target_info *btinfo,
4114 enum btrace_read_type type)
4115 {
4116 struct target_ops *t;
4117
4118 for (t = current_target.beneath; t != NULL; t = t->beneath)
4119 if (t->to_read_btrace != NULL)
4120 return t->to_read_btrace (t, btrace, btinfo, type);
4121
4122 tcomplain ();
4123 return BTRACE_ERR_NOT_SUPPORTED;
4124 }
4125
4126 /* See target.h. */
4127
4128 void
4129 target_stop_recording (void)
4130 {
4131 struct target_ops *t;
4132
4133 for (t = current_target.beneath; t != NULL; t = t->beneath)
4134 if (t->to_stop_recording != NULL)
4135 {
4136 t->to_stop_recording (t);
4137 return;
4138 }
4139
4140 /* This is optional. */
4141 }
4142
4143 /* See target.h. */
4144
4145 void
4146 target_info_record (void)
4147 {
4148 struct target_ops *t;
4149
4150 for (t = current_target.beneath; t != NULL; t = t->beneath)
4151 if (t->to_info_record != NULL)
4152 {
4153 t->to_info_record (t);
4154 return;
4155 }
4156
4157 tcomplain ();
4158 }
4159
4160 /* See target.h. */
4161
4162 void
4163 target_save_record (const char *filename)
4164 {
4165 struct target_ops *t;
4166
4167 for (t = current_target.beneath; t != NULL; t = t->beneath)
4168 if (t->to_save_record != NULL)
4169 {
4170 t->to_save_record (t, filename);
4171 return;
4172 }
4173
4174 tcomplain ();
4175 }
4176
4177 /* See target.h. */
4178
4179 int
4180 target_supports_delete_record (void)
4181 {
4182 struct target_ops *t;
4183
4184 for (t = current_target.beneath; t != NULL; t = t->beneath)
4185 if (t->to_delete_record != NULL)
4186 return 1;
4187
4188 return 0;
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_delete_record (void)
4195 {
4196 struct target_ops *t;
4197
4198 for (t = current_target.beneath; t != NULL; t = t->beneath)
4199 if (t->to_delete_record != NULL)
4200 {
4201 t->to_delete_record (t);
4202 return;
4203 }
4204
4205 tcomplain ();
4206 }
4207
4208 /* See target.h. */
4209
4210 int
4211 target_record_is_replaying (void)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_record_is_replaying != NULL)
4217 return t->to_record_is_replaying (t);
4218
4219 return 0;
4220 }
4221
4222 /* See target.h. */
4223
4224 void
4225 target_goto_record_begin (void)
4226 {
4227 struct target_ops *t;
4228
4229 for (t = current_target.beneath; t != NULL; t = t->beneath)
4230 if (t->to_goto_record_begin != NULL)
4231 {
4232 t->to_goto_record_begin (t);
4233 return;
4234 }
4235
4236 tcomplain ();
4237 }
4238
4239 /* See target.h. */
4240
4241 void
4242 target_goto_record_end (void)
4243 {
4244 struct target_ops *t;
4245
4246 for (t = current_target.beneath; t != NULL; t = t->beneath)
4247 if (t->to_goto_record_end != NULL)
4248 {
4249 t->to_goto_record_end (t);
4250 return;
4251 }
4252
4253 tcomplain ();
4254 }
4255
4256 /* See target.h. */
4257
4258 void
4259 target_goto_record (ULONGEST insn)
4260 {
4261 struct target_ops *t;
4262
4263 for (t = current_target.beneath; t != NULL; t = t->beneath)
4264 if (t->to_goto_record != NULL)
4265 {
4266 t->to_goto_record (t, insn);
4267 return;
4268 }
4269
4270 tcomplain ();
4271 }
4272
4273 /* See target.h. */
4274
4275 void
4276 target_insn_history (int size, int flags)
4277 {
4278 struct target_ops *t;
4279
4280 for (t = current_target.beneath; t != NULL; t = t->beneath)
4281 if (t->to_insn_history != NULL)
4282 {
4283 t->to_insn_history (t, size, flags);
4284 return;
4285 }
4286
4287 tcomplain ();
4288 }
4289
4290 /* See target.h. */
4291
4292 void
4293 target_insn_history_from (ULONGEST from, int size, int flags)
4294 {
4295 struct target_ops *t;
4296
4297 for (t = current_target.beneath; t != NULL; t = t->beneath)
4298 if (t->to_insn_history_from != NULL)
4299 {
4300 t->to_insn_history_from (t, from, size, flags);
4301 return;
4302 }
4303
4304 tcomplain ();
4305 }
4306
4307 /* See target.h. */
4308
4309 void
4310 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4311 {
4312 struct target_ops *t;
4313
4314 for (t = current_target.beneath; t != NULL; t = t->beneath)
4315 if (t->to_insn_history_range != NULL)
4316 {
4317 t->to_insn_history_range (t, begin, end, flags);
4318 return;
4319 }
4320
4321 tcomplain ();
4322 }
4323
4324 /* See target.h. */
4325
4326 void
4327 target_call_history (int size, int flags)
4328 {
4329 struct target_ops *t;
4330
4331 for (t = current_target.beneath; t != NULL; t = t->beneath)
4332 if (t->to_call_history != NULL)
4333 {
4334 t->to_call_history (t, size, flags);
4335 return;
4336 }
4337
4338 tcomplain ();
4339 }
4340
4341 /* See target.h. */
4342
4343 void
4344 target_call_history_from (ULONGEST begin, int size, int flags)
4345 {
4346 struct target_ops *t;
4347
4348 for (t = current_target.beneath; t != NULL; t = t->beneath)
4349 if (t->to_call_history_from != NULL)
4350 {
4351 t->to_call_history_from (t, begin, size, flags);
4352 return;
4353 }
4354
4355 tcomplain ();
4356 }
4357
4358 /* See target.h. */
4359
4360 void
4361 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4362 {
4363 struct target_ops *t;
4364
4365 for (t = current_target.beneath; t != NULL; t = t->beneath)
4366 if (t->to_call_history_range != NULL)
4367 {
4368 t->to_call_history_range (t, begin, end, flags);
4369 return;
4370 }
4371
4372 tcomplain ();
4373 }
4374
4375 static void
4376 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4377 {
4378 debug_target.to_prepare_to_store (&debug_target, regcache);
4379
4380 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4381 }
4382
4383 /* See target.h. */
4384
4385 const struct frame_unwind *
4386 target_get_unwinder (void)
4387 {
4388 struct target_ops *t;
4389
4390 for (t = current_target.beneath; t != NULL; t = t->beneath)
4391 if (t->to_get_unwinder != NULL)
4392 return t->to_get_unwinder;
4393
4394 return NULL;
4395 }
4396
4397 /* See target.h. */
4398
4399 const struct frame_unwind *
4400 target_get_tailcall_unwinder (void)
4401 {
4402 struct target_ops *t;
4403
4404 for (t = current_target.beneath; t != NULL; t = t->beneath)
4405 if (t->to_get_tailcall_unwinder != NULL)
4406 return t->to_get_tailcall_unwinder;
4407
4408 return NULL;
4409 }
4410
4411 /* See target.h. */
4412
4413 CORE_ADDR
4414 forward_target_decr_pc_after_break (struct target_ops *ops,
4415 struct gdbarch *gdbarch)
4416 {
4417 for (; ops != NULL; ops = ops->beneath)
4418 if (ops->to_decr_pc_after_break != NULL)
4419 return ops->to_decr_pc_after_break (ops, gdbarch);
4420
4421 return gdbarch_decr_pc_after_break (gdbarch);
4422 }
4423
4424 /* See target.h. */
4425
4426 CORE_ADDR
4427 target_decr_pc_after_break (struct gdbarch *gdbarch)
4428 {
4429 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4430 }
4431
4432 static int
4433 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4434 int write, struct mem_attrib *attrib,
4435 struct target_ops *target)
4436 {
4437 int retval;
4438
4439 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4440 attrib, target);
4441
4442 fprintf_unfiltered (gdb_stdlog,
4443 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4444 paddress (target_gdbarch (), memaddr), len,
4445 write ? "write" : "read", retval);
4446
4447 if (retval > 0)
4448 {
4449 int i;
4450
4451 fputs_unfiltered (", bytes =", gdb_stdlog);
4452 for (i = 0; i < retval; i++)
4453 {
4454 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4455 {
4456 if (targetdebug < 2 && i > 0)
4457 {
4458 fprintf_unfiltered (gdb_stdlog, " ...");
4459 break;
4460 }
4461 fprintf_unfiltered (gdb_stdlog, "\n");
4462 }
4463
4464 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4465 }
4466 }
4467
4468 fputc_unfiltered ('\n', gdb_stdlog);
4469
4470 return retval;
4471 }
4472
4473 static void
4474 debug_to_files_info (struct target_ops *target)
4475 {
4476 debug_target.to_files_info (target);
4477
4478 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4479 }
4480
4481 static int
4482 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4483 struct bp_target_info *bp_tgt)
4484 {
4485 int retval;
4486
4487 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4488
4489 fprintf_unfiltered (gdb_stdlog,
4490 "target_insert_breakpoint (%s, xxx) = %ld\n",
4491 core_addr_to_string (bp_tgt->placed_address),
4492 (unsigned long) retval);
4493 return retval;
4494 }
4495
4496 static int
4497 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4498 struct bp_target_info *bp_tgt)
4499 {
4500 int retval;
4501
4502 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4503
4504 fprintf_unfiltered (gdb_stdlog,
4505 "target_remove_breakpoint (%s, xxx) = %ld\n",
4506 core_addr_to_string (bp_tgt->placed_address),
4507 (unsigned long) retval);
4508 return retval;
4509 }
4510
4511 static int
4512 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4513 int type, int cnt, int from_tty)
4514 {
4515 int retval;
4516
4517 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4518 type, cnt, from_tty);
4519
4520 fprintf_unfiltered (gdb_stdlog,
4521 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4522 (unsigned long) type,
4523 (unsigned long) cnt,
4524 (unsigned long) from_tty,
4525 (unsigned long) retval);
4526 return retval;
4527 }
4528
4529 static int
4530 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4531 CORE_ADDR addr, int len)
4532 {
4533 CORE_ADDR retval;
4534
4535 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4536 addr, len);
4537
4538 fprintf_unfiltered (gdb_stdlog,
4539 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4540 core_addr_to_string (addr), (unsigned long) len,
4541 core_addr_to_string (retval));
4542 return retval;
4543 }
4544
4545 static int
4546 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4547 CORE_ADDR addr, int len, int rw,
4548 struct expression *cond)
4549 {
4550 int retval;
4551
4552 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4553 addr, len,
4554 rw, cond);
4555
4556 fprintf_unfiltered (gdb_stdlog,
4557 "target_can_accel_watchpoint_condition "
4558 "(%s, %d, %d, %s) = %ld\n",
4559 core_addr_to_string (addr), len, rw,
4560 host_address_to_string (cond), (unsigned long) retval);
4561 return retval;
4562 }
4563
4564 static int
4565 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4566 {
4567 int retval;
4568
4569 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4570
4571 fprintf_unfiltered (gdb_stdlog,
4572 "target_stopped_by_watchpoint () = %ld\n",
4573 (unsigned long) retval);
4574 return retval;
4575 }
4576
4577 static int
4578 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4579 {
4580 int retval;
4581
4582 retval = debug_target.to_stopped_data_address (target, addr);
4583
4584 fprintf_unfiltered (gdb_stdlog,
4585 "target_stopped_data_address ([%s]) = %ld\n",
4586 core_addr_to_string (*addr),
4587 (unsigned long)retval);
4588 return retval;
4589 }
4590
4591 static int
4592 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4593 CORE_ADDR addr,
4594 CORE_ADDR start, int length)
4595 {
4596 int retval;
4597
4598 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4599 start, length);
4600
4601 fprintf_filtered (gdb_stdlog,
4602 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4603 core_addr_to_string (addr), core_addr_to_string (start),
4604 length, retval);
4605 return retval;
4606 }
4607
4608 static int
4609 debug_to_insert_hw_breakpoint (struct target_ops *self,
4610 struct gdbarch *gdbarch,
4611 struct bp_target_info *bp_tgt)
4612 {
4613 int retval;
4614
4615 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4616 gdbarch, bp_tgt);
4617
4618 fprintf_unfiltered (gdb_stdlog,
4619 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4620 core_addr_to_string (bp_tgt->placed_address),
4621 (unsigned long) retval);
4622 return retval;
4623 }
4624
4625 static int
4626 debug_to_remove_hw_breakpoint (struct target_ops *self,
4627 struct gdbarch *gdbarch,
4628 struct bp_target_info *bp_tgt)
4629 {
4630 int retval;
4631
4632 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4633 gdbarch, bp_tgt);
4634
4635 fprintf_unfiltered (gdb_stdlog,
4636 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4637 core_addr_to_string (bp_tgt->placed_address),
4638 (unsigned long) retval);
4639 return retval;
4640 }
4641
4642 static int
4643 debug_to_insert_watchpoint (struct target_ops *self,
4644 CORE_ADDR addr, int len, int type,
4645 struct expression *cond)
4646 {
4647 int retval;
4648
4649 retval = debug_target.to_insert_watchpoint (&debug_target,
4650 addr, len, type, cond);
4651
4652 fprintf_unfiltered (gdb_stdlog,
4653 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4654 core_addr_to_string (addr), len, type,
4655 host_address_to_string (cond), (unsigned long) retval);
4656 return retval;
4657 }
4658
4659 static int
4660 debug_to_remove_watchpoint (struct target_ops *self,
4661 CORE_ADDR addr, int len, int type,
4662 struct expression *cond)
4663 {
4664 int retval;
4665
4666 retval = debug_target.to_remove_watchpoint (&debug_target,
4667 addr, len, type, cond);
4668
4669 fprintf_unfiltered (gdb_stdlog,
4670 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4671 core_addr_to_string (addr), len, type,
4672 host_address_to_string (cond), (unsigned long) retval);
4673 return retval;
4674 }
4675
4676 static void
4677 debug_to_terminal_init (struct target_ops *self)
4678 {
4679 debug_target.to_terminal_init (&debug_target);
4680
4681 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4682 }
4683
4684 static void
4685 debug_to_terminal_inferior (struct target_ops *self)
4686 {
4687 debug_target.to_terminal_inferior (&debug_target);
4688
4689 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4690 }
4691
4692 static void
4693 debug_to_terminal_ours_for_output (struct target_ops *self)
4694 {
4695 debug_target.to_terminal_ours_for_output (&debug_target);
4696
4697 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4698 }
4699
4700 static void
4701 debug_to_terminal_ours (struct target_ops *self)
4702 {
4703 debug_target.to_terminal_ours (&debug_target);
4704
4705 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4706 }
4707
4708 static void
4709 debug_to_terminal_save_ours (struct target_ops *self)
4710 {
4711 debug_target.to_terminal_save_ours (&debug_target);
4712
4713 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4714 }
4715
4716 static void
4717 debug_to_terminal_info (struct target_ops *self,
4718 const char *arg, int from_tty)
4719 {
4720 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4721
4722 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4723 from_tty);
4724 }
4725
4726 static void
4727 debug_to_load (struct target_ops *self, char *args, int from_tty)
4728 {
4729 debug_target.to_load (&debug_target, args, from_tty);
4730
4731 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4732 }
4733
4734 static void
4735 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4736 {
4737 debug_target.to_post_startup_inferior (&debug_target, ptid);
4738
4739 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4740 ptid_get_pid (ptid));
4741 }
4742
4743 static int
4744 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4745 {
4746 int retval;
4747
4748 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4749
4750 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4751 pid, retval);
4752
4753 return retval;
4754 }
4755
4756 static int
4757 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4758 {
4759 int retval;
4760
4761 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4762
4763 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4764 pid, retval);
4765
4766 return retval;
4767 }
4768
4769 static int
4770 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4771 {
4772 int retval;
4773
4774 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4775
4776 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4777 pid, retval);
4778
4779 return retval;
4780 }
4781
4782 static int
4783 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4784 {
4785 int retval;
4786
4787 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4788
4789 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4790 pid, retval);
4791
4792 return retval;
4793 }
4794
4795 static int
4796 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4797 {
4798 int retval;
4799
4800 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4801
4802 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4803 pid, retval);
4804
4805 return retval;
4806 }
4807
4808 static int
4809 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4810 {
4811 int retval;
4812
4813 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4816 pid, retval);
4817
4818 return retval;
4819 }
4820
4821 static int
4822 debug_to_has_exited (struct target_ops *self,
4823 int pid, int wait_status, int *exit_status)
4824 {
4825 int has_exited;
4826
4827 has_exited = debug_target.to_has_exited (&debug_target,
4828 pid, wait_status, exit_status);
4829
4830 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4831 pid, wait_status, *exit_status, has_exited);
4832
4833 return has_exited;
4834 }
4835
4836 static int
4837 debug_to_can_run (struct target_ops *self)
4838 {
4839 int retval;
4840
4841 retval = debug_target.to_can_run (&debug_target);
4842
4843 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4844
4845 return retval;
4846 }
4847
4848 static struct gdbarch *
4849 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4850 {
4851 struct gdbarch *retval;
4852
4853 retval = debug_target.to_thread_architecture (ops, ptid);
4854
4855 fprintf_unfiltered (gdb_stdlog,
4856 "target_thread_architecture (%s) = %s [%s]\n",
4857 target_pid_to_str (ptid),
4858 host_address_to_string (retval),
4859 gdbarch_bfd_arch_info (retval)->printable_name);
4860 return retval;
4861 }
4862
4863 static void
4864 debug_to_stop (struct target_ops *self, ptid_t ptid)
4865 {
4866 debug_target.to_stop (&debug_target, ptid);
4867
4868 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4869 target_pid_to_str (ptid));
4870 }
4871
4872 static void
4873 debug_to_rcmd (struct target_ops *self, char *command,
4874 struct ui_file *outbuf)
4875 {
4876 debug_target.to_rcmd (&debug_target, command, outbuf);
4877 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4878 }
4879
4880 static char *
4881 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4882 {
4883 char *exec_file;
4884
4885 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4886
4887 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4888 pid, exec_file);
4889
4890 return exec_file;
4891 }
4892
4893 static void
4894 setup_target_debug (void)
4895 {
4896 memcpy (&debug_target, &current_target, sizeof debug_target);
4897
4898 current_target.to_open = debug_to_open;
4899 current_target.to_post_attach = debug_to_post_attach;
4900 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4901 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4902 current_target.to_files_info = debug_to_files_info;
4903 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4904 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4905 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4906 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4907 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4908 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4909 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4910 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4911 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4912 current_target.to_watchpoint_addr_within_range
4913 = debug_to_watchpoint_addr_within_range;
4914 current_target.to_region_ok_for_hw_watchpoint
4915 = debug_to_region_ok_for_hw_watchpoint;
4916 current_target.to_can_accel_watchpoint_condition
4917 = debug_to_can_accel_watchpoint_condition;
4918 current_target.to_terminal_init = debug_to_terminal_init;
4919 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4920 current_target.to_terminal_ours_for_output
4921 = debug_to_terminal_ours_for_output;
4922 current_target.to_terminal_ours = debug_to_terminal_ours;
4923 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4924 current_target.to_terminal_info = debug_to_terminal_info;
4925 current_target.to_load = debug_to_load;
4926 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4927 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4928 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4929 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4930 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4931 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4932 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4933 current_target.to_has_exited = debug_to_has_exited;
4934 current_target.to_can_run = debug_to_can_run;
4935 current_target.to_stop = debug_to_stop;
4936 current_target.to_rcmd = debug_to_rcmd;
4937 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4938 current_target.to_thread_architecture = debug_to_thread_architecture;
4939 }
4940 \f
4941
4942 static char targ_desc[] =
4943 "Names of targets and files being debugged.\nShows the entire \
4944 stack of targets currently in use (including the exec-file,\n\
4945 core-file, and process, if any), as well as the symbol file name.";
4946
4947 static void
4948 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4949 {
4950 error (_("\"monitor\" command not supported by this target."));
4951 }
4952
4953 static void
4954 do_monitor_command (char *cmd,
4955 int from_tty)
4956 {
4957 target_rcmd (cmd, gdb_stdtarg);
4958 }
4959
4960 /* Print the name of each layers of our target stack. */
4961
4962 static void
4963 maintenance_print_target_stack (char *cmd, int from_tty)
4964 {
4965 struct target_ops *t;
4966
4967 printf_filtered (_("The current target stack is:\n"));
4968
4969 for (t = target_stack; t != NULL; t = t->beneath)
4970 {
4971 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4972 }
4973 }
4974
4975 /* Controls if async mode is permitted. */
4976 int target_async_permitted = 0;
4977
4978 /* The set command writes to this variable. If the inferior is
4979 executing, target_async_permitted is *not* updated. */
4980 static int target_async_permitted_1 = 0;
4981
4982 static void
4983 set_target_async_command (char *args, int from_tty,
4984 struct cmd_list_element *c)
4985 {
4986 if (have_live_inferiors ())
4987 {
4988 target_async_permitted_1 = target_async_permitted;
4989 error (_("Cannot change this setting while the inferior is running."));
4990 }
4991
4992 target_async_permitted = target_async_permitted_1;
4993 }
4994
4995 static void
4996 show_target_async_command (struct ui_file *file, int from_tty,
4997 struct cmd_list_element *c,
4998 const char *value)
4999 {
5000 fprintf_filtered (file,
5001 _("Controlling the inferior in "
5002 "asynchronous mode is %s.\n"), value);
5003 }
5004
5005 /* Temporary copies of permission settings. */
5006
5007 static int may_write_registers_1 = 1;
5008 static int may_write_memory_1 = 1;
5009 static int may_insert_breakpoints_1 = 1;
5010 static int may_insert_tracepoints_1 = 1;
5011 static int may_insert_fast_tracepoints_1 = 1;
5012 static int may_stop_1 = 1;
5013
5014 /* Make the user-set values match the real values again. */
5015
5016 void
5017 update_target_permissions (void)
5018 {
5019 may_write_registers_1 = may_write_registers;
5020 may_write_memory_1 = may_write_memory;
5021 may_insert_breakpoints_1 = may_insert_breakpoints;
5022 may_insert_tracepoints_1 = may_insert_tracepoints;
5023 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5024 may_stop_1 = may_stop;
5025 }
5026
5027 /* The one function handles (most of) the permission flags in the same
5028 way. */
5029
5030 static void
5031 set_target_permissions (char *args, int from_tty,
5032 struct cmd_list_element *c)
5033 {
5034 if (target_has_execution)
5035 {
5036 update_target_permissions ();
5037 error (_("Cannot change this setting while the inferior is running."));
5038 }
5039
5040 /* Make the real values match the user-changed values. */
5041 may_write_registers = may_write_registers_1;
5042 may_insert_breakpoints = may_insert_breakpoints_1;
5043 may_insert_tracepoints = may_insert_tracepoints_1;
5044 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5045 may_stop = may_stop_1;
5046 update_observer_mode ();
5047 }
5048
5049 /* Set memory write permission independently of observer mode. */
5050
5051 static void
5052 set_write_memory_permission (char *args, int from_tty,
5053 struct cmd_list_element *c)
5054 {
5055 /* Make the real values match the user-changed values. */
5056 may_write_memory = may_write_memory_1;
5057 update_observer_mode ();
5058 }
5059
5060
5061 void
5062 initialize_targets (void)
5063 {
5064 init_dummy_target ();
5065 push_target (&dummy_target);
5066
5067 add_info ("target", target_info, targ_desc);
5068 add_info ("files", target_info, targ_desc);
5069
5070 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5071 Set target debugging."), _("\
5072 Show target debugging."), _("\
5073 When non-zero, target debugging is enabled. Higher numbers are more\n\
5074 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5075 command."),
5076 NULL,
5077 show_targetdebug,
5078 &setdebuglist, &showdebuglist);
5079
5080 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5081 &trust_readonly, _("\
5082 Set mode for reading from readonly sections."), _("\
5083 Show mode for reading from readonly sections."), _("\
5084 When this mode is on, memory reads from readonly sections (such as .text)\n\
5085 will be read from the object file instead of from the target. This will\n\
5086 result in significant performance improvement for remote targets."),
5087 NULL,
5088 show_trust_readonly,
5089 &setlist, &showlist);
5090
5091 add_com ("monitor", class_obscure, do_monitor_command,
5092 _("Send a command to the remote monitor (remote targets only)."));
5093
5094 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5095 _("Print the name of each layer of the internal target stack."),
5096 &maintenanceprintlist);
5097
5098 add_setshow_boolean_cmd ("target-async", no_class,
5099 &target_async_permitted_1, _("\
5100 Set whether gdb controls the inferior in asynchronous mode."), _("\
5101 Show whether gdb controls the inferior in asynchronous mode."), _("\
5102 Tells gdb whether to control the inferior in asynchronous mode."),
5103 set_target_async_command,
5104 show_target_async_command,
5105 &setlist,
5106 &showlist);
5107
5108 add_setshow_boolean_cmd ("may-write-registers", class_support,
5109 &may_write_registers_1, _("\
5110 Set permission to write into registers."), _("\
5111 Show permission to write into registers."), _("\
5112 When this permission is on, GDB may write into the target's registers.\n\
5113 Otherwise, any sort of write attempt will result in an error."),
5114 set_target_permissions, NULL,
5115 &setlist, &showlist);
5116
5117 add_setshow_boolean_cmd ("may-write-memory", class_support,
5118 &may_write_memory_1, _("\
5119 Set permission to write into target memory."), _("\
5120 Show permission to write into target memory."), _("\
5121 When this permission is on, GDB may write into the target's memory.\n\
5122 Otherwise, any sort of write attempt will result in an error."),
5123 set_write_memory_permission, NULL,
5124 &setlist, &showlist);
5125
5126 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5127 &may_insert_breakpoints_1, _("\
5128 Set permission to insert breakpoints in the target."), _("\
5129 Show permission to insert breakpoints in the target."), _("\
5130 When this permission is on, GDB may insert breakpoints in the program.\n\
5131 Otherwise, any sort of insertion attempt will result in an error."),
5132 set_target_permissions, NULL,
5133 &setlist, &showlist);
5134
5135 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5136 &may_insert_tracepoints_1, _("\
5137 Set permission to insert tracepoints in the target."), _("\
5138 Show permission to insert tracepoints in the target."), _("\
5139 When this permission is on, GDB may insert tracepoints in the program.\n\
5140 Otherwise, any sort of insertion attempt will result in an error."),
5141 set_target_permissions, NULL,
5142 &setlist, &showlist);
5143
5144 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5145 &may_insert_fast_tracepoints_1, _("\
5146 Set permission to insert fast tracepoints in the target."), _("\
5147 Show permission to insert fast tracepoints in the target."), _("\
5148 When this permission is on, GDB may insert fast tracepoints.\n\
5149 Otherwise, any sort of insertion attempt will result in an error."),
5150 set_target_permissions, NULL,
5151 &setlist, &showlist);
5152
5153 add_setshow_boolean_cmd ("may-interrupt", class_support,
5154 &may_stop_1, _("\
5155 Set permission to interrupt or signal the target."), _("\
5156 Show permission to interrupt or signal the target."), _("\
5157 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5158 Otherwise, any attempt to interrupt or stop will be ignored."),
5159 set_target_permissions, NULL,
5160 &setlist, &showlist);
5161 }
This page took 0.167646 seconds and 4 git commands to generate.