* symtab.c (iterate_over_some_symtabs): Add comment.
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (const char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_load (char *, int);
146
147 static int debug_to_can_run (void);
148
149 static void debug_to_stop (ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The option sets this. */
216 static int stack_cache_enabled_p_1 = 1;
217 /* And set_stack_cache_enabled_p updates this.
218 The reason for the separation is so that we don't flush the cache for
219 on->on transitions. */
220 static int stack_cache_enabled_p = 1;
221
222 /* This is called *after* the stack-cache has been set.
223 Flush the cache for off->on and on->off transitions.
224 There's no real need to flush the cache for on->off transitions,
225 except cleanliness. */
226
227 static void
228 set_stack_cache_enabled_p (char *args, int from_tty,
229 struct cmd_list_element *c)
230 {
231 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
232 target_dcache_invalidate ();
233
234 stack_cache_enabled_p = stack_cache_enabled_p_1;
235 }
236
237 static void
238 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
239 struct cmd_list_element *c, const char *value)
240 {
241 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
242 }
243
244 /* Cache of memory operations, to speed up remote access. */
245 static DCACHE *target_dcache;
246
247 /* Invalidate the target dcache. */
248
249 void
250 target_dcache_invalidate (void)
251 {
252 dcache_invalidate (target_dcache);
253 }
254
255 /* The user just typed 'target' without the name of a target. */
256
257 static void
258 target_command (char *arg, int from_tty)
259 {
260 fputs_filtered ("Argument required (target name). Try `help target'\n",
261 gdb_stdout);
262 }
263
264 /* Default target_has_* methods for process_stratum targets. */
265
266 int
267 default_child_has_all_memory (struct target_ops *ops)
268 {
269 /* If no inferior selected, then we can't read memory here. */
270 if (ptid_equal (inferior_ptid, null_ptid))
271 return 0;
272
273 return 1;
274 }
275
276 int
277 default_child_has_memory (struct target_ops *ops)
278 {
279 /* If no inferior selected, then we can't read memory here. */
280 if (ptid_equal (inferior_ptid, null_ptid))
281 return 0;
282
283 return 1;
284 }
285
286 int
287 default_child_has_stack (struct target_ops *ops)
288 {
289 /* If no inferior selected, there's no stack. */
290 if (ptid_equal (inferior_ptid, null_ptid))
291 return 0;
292
293 return 1;
294 }
295
296 int
297 default_child_has_registers (struct target_ops *ops)
298 {
299 /* Can't read registers from no inferior. */
300 if (ptid_equal (inferior_ptid, null_ptid))
301 return 0;
302
303 return 1;
304 }
305
306 int
307 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
308 {
309 /* If there's no thread selected, then we can't make it run through
310 hoops. */
311 if (ptid_equal (the_ptid, null_ptid))
312 return 0;
313
314 return 1;
315 }
316
317
318 int
319 target_has_all_memory_1 (void)
320 {
321 struct target_ops *t;
322
323 for (t = current_target.beneath; t != NULL; t = t->beneath)
324 if (t->to_has_all_memory (t))
325 return 1;
326
327 return 0;
328 }
329
330 int
331 target_has_memory_1 (void)
332 {
333 struct target_ops *t;
334
335 for (t = current_target.beneath; t != NULL; t = t->beneath)
336 if (t->to_has_memory (t))
337 return 1;
338
339 return 0;
340 }
341
342 int
343 target_has_stack_1 (void)
344 {
345 struct target_ops *t;
346
347 for (t = current_target.beneath; t != NULL; t = t->beneath)
348 if (t->to_has_stack (t))
349 return 1;
350
351 return 0;
352 }
353
354 int
355 target_has_registers_1 (void)
356 {
357 struct target_ops *t;
358
359 for (t = current_target.beneath; t != NULL; t = t->beneath)
360 if (t->to_has_registers (t))
361 return 1;
362
363 return 0;
364 }
365
366 int
367 target_has_execution_1 (ptid_t the_ptid)
368 {
369 struct target_ops *t;
370
371 for (t = current_target.beneath; t != NULL; t = t->beneath)
372 if (t->to_has_execution (t, the_ptid))
373 return 1;
374
375 return 0;
376 }
377
378 int
379 target_has_execution_current (void)
380 {
381 return target_has_execution_1 (inferior_ptid);
382 }
383
384 /* Complete initialization of T. This ensures that various fields in
385 T are set, if needed by the target implementation. */
386
387 void
388 complete_target_initialization (struct target_ops *t)
389 {
390 /* Provide default values for all "must have" methods. */
391 if (t->to_xfer_partial == NULL)
392 t->to_xfer_partial = default_xfer_partial;
393
394 if (t->to_has_all_memory == NULL)
395 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
396
397 if (t->to_has_memory == NULL)
398 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
399
400 if (t->to_has_stack == NULL)
401 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
402
403 if (t->to_has_registers == NULL)
404 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
405
406 if (t->to_has_execution == NULL)
407 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
408 }
409
410 /* Add possible target architecture T to the list and add a new
411 command 'target T->to_shortname'. Set COMPLETER as the command's
412 completer if not NULL. */
413
414 void
415 add_target_with_completer (struct target_ops *t,
416 completer_ftype *completer)
417 {
418 struct cmd_list_element *c;
419
420 complete_target_initialization (t);
421
422 if (!target_structs)
423 {
424 target_struct_allocsize = DEFAULT_ALLOCSIZE;
425 target_structs = (struct target_ops **) xmalloc
426 (target_struct_allocsize * sizeof (*target_structs));
427 }
428 if (target_struct_size >= target_struct_allocsize)
429 {
430 target_struct_allocsize *= 2;
431 target_structs = (struct target_ops **)
432 xrealloc ((char *) target_structs,
433 target_struct_allocsize * sizeof (*target_structs));
434 }
435 target_structs[target_struct_size++] = t;
436
437 if (targetlist == NULL)
438 add_prefix_cmd ("target", class_run, target_command, _("\
439 Connect to a target machine or process.\n\
440 The first argument is the type or protocol of the target machine.\n\
441 Remaining arguments are interpreted by the target protocol. For more\n\
442 information on the arguments for a particular protocol, type\n\
443 `help target ' followed by the protocol name."),
444 &targetlist, "target ", 0, &cmdlist);
445 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
446 &targetlist);
447 if (completer != NULL)
448 set_cmd_completer (c, completer);
449 }
450
451 /* Add a possible target architecture to the list. */
452
453 void
454 add_target (struct target_ops *t)
455 {
456 add_target_with_completer (t, NULL);
457 }
458
459 /* See target.h. */
460
461 void
462 add_deprecated_target_alias (struct target_ops *t, char *alias)
463 {
464 struct cmd_list_element *c;
465 char *alt;
466
467 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
468 see PR cli/15104. */
469 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
470 alt = xstrprintf ("target %s", t->to_shortname);
471 deprecate_cmd (c, alt);
472 }
473
474 /* Stub functions */
475
476 void
477 target_ignore (void)
478 {
479 }
480
481 void
482 target_kill (void)
483 {
484 struct target_ops *t;
485
486 for (t = current_target.beneath; t != NULL; t = t->beneath)
487 if (t->to_kill != NULL)
488 {
489 if (targetdebug)
490 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
491
492 t->to_kill (t);
493 return;
494 }
495
496 noprocess ();
497 }
498
499 void
500 target_load (char *arg, int from_tty)
501 {
502 target_dcache_invalidate ();
503 (*current_target.to_load) (arg, from_tty);
504 }
505
506 void
507 target_create_inferior (char *exec_file, char *args,
508 char **env, int from_tty)
509 {
510 struct target_ops *t;
511
512 for (t = current_target.beneath; t != NULL; t = t->beneath)
513 {
514 if (t->to_create_inferior != NULL)
515 {
516 t->to_create_inferior (t, exec_file, args, env, from_tty);
517 if (targetdebug)
518 fprintf_unfiltered (gdb_stdlog,
519 "target_create_inferior (%s, %s, xxx, %d)\n",
520 exec_file, args, from_tty);
521 return;
522 }
523 }
524
525 internal_error (__FILE__, __LINE__,
526 _("could not find a target to create inferior"));
527 }
528
529 void
530 target_terminal_inferior (void)
531 {
532 /* A background resume (``run&'') should leave GDB in control of the
533 terminal. Use target_can_async_p, not target_is_async_p, since at
534 this point the target is not async yet. However, if sync_execution
535 is not set, we know it will become async prior to resume. */
536 if (target_can_async_p () && !sync_execution)
537 return;
538
539 /* If GDB is resuming the inferior in the foreground, install
540 inferior's terminal modes. */
541 (*current_target.to_terminal_inferior) ();
542 }
543
544 static int
545 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
546 struct target_ops *t)
547 {
548 errno = EIO; /* Can't read/write this location. */
549 return 0; /* No bytes handled. */
550 }
551
552 static void
553 tcomplain (void)
554 {
555 error (_("You can't do that when your target is `%s'"),
556 current_target.to_shortname);
557 }
558
559 void
560 noprocess (void)
561 {
562 error (_("You can't do that without a process to debug."));
563 }
564
565 static void
566 default_terminal_info (const char *args, int from_tty)
567 {
568 printf_unfiltered (_("No saved terminal information.\n"));
569 }
570
571 /* A default implementation for the to_get_ada_task_ptid target method.
572
573 This function builds the PTID by using both LWP and TID as part of
574 the PTID lwp and tid elements. The pid used is the pid of the
575 inferior_ptid. */
576
577 static ptid_t
578 default_get_ada_task_ptid (long lwp, long tid)
579 {
580 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
581 }
582
583 static enum exec_direction_kind
584 default_execution_direction (void)
585 {
586 if (!target_can_execute_reverse)
587 return EXEC_FORWARD;
588 else if (!target_can_async_p ())
589 return EXEC_FORWARD;
590 else
591 gdb_assert_not_reached ("\
592 to_execution_direction must be implemented for reverse async");
593 }
594
595 /* Go through the target stack from top to bottom, copying over zero
596 entries in current_target, then filling in still empty entries. In
597 effect, we are doing class inheritance through the pushed target
598 vectors.
599
600 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
601 is currently implemented, is that it discards any knowledge of
602 which target an inherited method originally belonged to.
603 Consequently, new new target methods should instead explicitly and
604 locally search the target stack for the target that can handle the
605 request. */
606
607 static void
608 update_current_target (void)
609 {
610 struct target_ops *t;
611
612 /* First, reset current's contents. */
613 memset (&current_target, 0, sizeof (current_target));
614
615 #define INHERIT(FIELD, TARGET) \
616 if (!current_target.FIELD) \
617 current_target.FIELD = (TARGET)->FIELD
618
619 for (t = target_stack; t; t = t->beneath)
620 {
621 INHERIT (to_shortname, t);
622 INHERIT (to_longname, t);
623 INHERIT (to_doc, t);
624 /* Do not inherit to_open. */
625 /* Do not inherit to_close. */
626 /* Do not inherit to_attach. */
627 INHERIT (to_post_attach, t);
628 INHERIT (to_attach_no_wait, t);
629 /* Do not inherit to_detach. */
630 /* Do not inherit to_disconnect. */
631 /* Do not inherit to_resume. */
632 /* Do not inherit to_wait. */
633 /* Do not inherit to_fetch_registers. */
634 /* Do not inherit to_store_registers. */
635 INHERIT (to_prepare_to_store, t);
636 INHERIT (deprecated_xfer_memory, t);
637 INHERIT (to_files_info, t);
638 INHERIT (to_insert_breakpoint, t);
639 INHERIT (to_remove_breakpoint, t);
640 INHERIT (to_can_use_hw_breakpoint, t);
641 INHERIT (to_insert_hw_breakpoint, t);
642 INHERIT (to_remove_hw_breakpoint, t);
643 /* Do not inherit to_ranged_break_num_registers. */
644 INHERIT (to_insert_watchpoint, t);
645 INHERIT (to_remove_watchpoint, t);
646 /* Do not inherit to_insert_mask_watchpoint. */
647 /* Do not inherit to_remove_mask_watchpoint. */
648 INHERIT (to_stopped_data_address, t);
649 INHERIT (to_have_steppable_watchpoint, t);
650 INHERIT (to_have_continuable_watchpoint, t);
651 INHERIT (to_stopped_by_watchpoint, t);
652 INHERIT (to_watchpoint_addr_within_range, t);
653 INHERIT (to_region_ok_for_hw_watchpoint, t);
654 INHERIT (to_can_accel_watchpoint_condition, t);
655 /* Do not inherit to_masked_watch_num_registers. */
656 INHERIT (to_terminal_init, t);
657 INHERIT (to_terminal_inferior, t);
658 INHERIT (to_terminal_ours_for_output, t);
659 INHERIT (to_terminal_ours, t);
660 INHERIT (to_terminal_save_ours, t);
661 INHERIT (to_terminal_info, t);
662 /* Do not inherit to_kill. */
663 INHERIT (to_load, t);
664 /* Do no inherit to_create_inferior. */
665 INHERIT (to_post_startup_inferior, t);
666 INHERIT (to_insert_fork_catchpoint, t);
667 INHERIT (to_remove_fork_catchpoint, t);
668 INHERIT (to_insert_vfork_catchpoint, t);
669 INHERIT (to_remove_vfork_catchpoint, t);
670 /* Do not inherit to_follow_fork. */
671 INHERIT (to_insert_exec_catchpoint, t);
672 INHERIT (to_remove_exec_catchpoint, t);
673 INHERIT (to_set_syscall_catchpoint, t);
674 INHERIT (to_has_exited, t);
675 /* Do not inherit to_mourn_inferior. */
676 INHERIT (to_can_run, t);
677 /* Do not inherit to_pass_signals. */
678 /* Do not inherit to_program_signals. */
679 /* Do not inherit to_thread_alive. */
680 /* Do not inherit to_find_new_threads. */
681 /* Do not inherit to_pid_to_str. */
682 INHERIT (to_extra_thread_info, t);
683 INHERIT (to_thread_name, t);
684 INHERIT (to_stop, t);
685 /* Do not inherit to_xfer_partial. */
686 INHERIT (to_rcmd, t);
687 INHERIT (to_pid_to_exec_file, t);
688 INHERIT (to_log_command, t);
689 INHERIT (to_stratum, t);
690 /* Do not inherit to_has_all_memory. */
691 /* Do not inherit to_has_memory. */
692 /* Do not inherit to_has_stack. */
693 /* Do not inherit to_has_registers. */
694 /* Do not inherit to_has_execution. */
695 INHERIT (to_has_thread_control, t);
696 INHERIT (to_can_async_p, t);
697 INHERIT (to_is_async_p, t);
698 INHERIT (to_async, t);
699 INHERIT (to_find_memory_regions, t);
700 INHERIT (to_make_corefile_notes, t);
701 INHERIT (to_get_bookmark, t);
702 INHERIT (to_goto_bookmark, t);
703 /* Do not inherit to_get_thread_local_address. */
704 INHERIT (to_can_execute_reverse, t);
705 INHERIT (to_execution_direction, t);
706 INHERIT (to_thread_architecture, t);
707 /* Do not inherit to_read_description. */
708 INHERIT (to_get_ada_task_ptid, t);
709 /* Do not inherit to_search_memory. */
710 INHERIT (to_supports_multi_process, t);
711 INHERIT (to_supports_enable_disable_tracepoint, t);
712 INHERIT (to_supports_string_tracing, t);
713 INHERIT (to_trace_init, t);
714 INHERIT (to_download_tracepoint, t);
715 INHERIT (to_can_download_tracepoint, t);
716 INHERIT (to_download_trace_state_variable, t);
717 INHERIT (to_enable_tracepoint, t);
718 INHERIT (to_disable_tracepoint, t);
719 INHERIT (to_trace_set_readonly_regions, t);
720 INHERIT (to_trace_start, t);
721 INHERIT (to_get_trace_status, t);
722 INHERIT (to_get_tracepoint_status, t);
723 INHERIT (to_trace_stop, t);
724 INHERIT (to_trace_find, t);
725 INHERIT (to_get_trace_state_variable_value, t);
726 INHERIT (to_save_trace_data, t);
727 INHERIT (to_upload_tracepoints, t);
728 INHERIT (to_upload_trace_state_variables, t);
729 INHERIT (to_get_raw_trace_data, t);
730 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
731 INHERIT (to_set_disconnected_tracing, t);
732 INHERIT (to_set_circular_trace_buffer, t);
733 INHERIT (to_set_trace_buffer_size, t);
734 INHERIT (to_set_trace_notes, t);
735 INHERIT (to_get_tib_address, t);
736 INHERIT (to_set_permissions, t);
737 INHERIT (to_static_tracepoint_marker_at, t);
738 INHERIT (to_static_tracepoint_markers_by_strid, t);
739 INHERIT (to_traceframe_info, t);
740 INHERIT (to_use_agent, t);
741 INHERIT (to_can_use_agent, t);
742 INHERIT (to_augmented_libraries_svr4_read, t);
743 INHERIT (to_magic, t);
744 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
745 INHERIT (to_can_run_breakpoint_commands, t);
746 /* Do not inherit to_memory_map. */
747 /* Do not inherit to_flash_erase. */
748 /* Do not inherit to_flash_done. */
749 }
750 #undef INHERIT
751
752 /* Clean up a target struct so it no longer has any zero pointers in
753 it. Some entries are defaulted to a method that print an error,
754 others are hard-wired to a standard recursive default. */
755
756 #define de_fault(field, value) \
757 if (!current_target.field) \
758 current_target.field = value
759
760 de_fault (to_open,
761 (void (*) (char *, int))
762 tcomplain);
763 de_fault (to_close,
764 (void (*) (void))
765 target_ignore);
766 de_fault (to_post_attach,
767 (void (*) (int))
768 target_ignore);
769 de_fault (to_prepare_to_store,
770 (void (*) (struct regcache *))
771 noprocess);
772 de_fault (deprecated_xfer_memory,
773 (int (*) (CORE_ADDR, gdb_byte *, int, int,
774 struct mem_attrib *, struct target_ops *))
775 nomemory);
776 de_fault (to_files_info,
777 (void (*) (struct target_ops *))
778 target_ignore);
779 de_fault (to_insert_breakpoint,
780 memory_insert_breakpoint);
781 de_fault (to_remove_breakpoint,
782 memory_remove_breakpoint);
783 de_fault (to_can_use_hw_breakpoint,
784 (int (*) (int, int, int))
785 return_zero);
786 de_fault (to_insert_hw_breakpoint,
787 (int (*) (struct gdbarch *, struct bp_target_info *))
788 return_minus_one);
789 de_fault (to_remove_hw_breakpoint,
790 (int (*) (struct gdbarch *, struct bp_target_info *))
791 return_minus_one);
792 de_fault (to_insert_watchpoint,
793 (int (*) (CORE_ADDR, int, int, struct expression *))
794 return_minus_one);
795 de_fault (to_remove_watchpoint,
796 (int (*) (CORE_ADDR, int, int, struct expression *))
797 return_minus_one);
798 de_fault (to_stopped_by_watchpoint,
799 (int (*) (void))
800 return_zero);
801 de_fault (to_stopped_data_address,
802 (int (*) (struct target_ops *, CORE_ADDR *))
803 return_zero);
804 de_fault (to_watchpoint_addr_within_range,
805 default_watchpoint_addr_within_range);
806 de_fault (to_region_ok_for_hw_watchpoint,
807 default_region_ok_for_hw_watchpoint);
808 de_fault (to_can_accel_watchpoint_condition,
809 (int (*) (CORE_ADDR, int, int, struct expression *))
810 return_zero);
811 de_fault (to_terminal_init,
812 (void (*) (void))
813 target_ignore);
814 de_fault (to_terminal_inferior,
815 (void (*) (void))
816 target_ignore);
817 de_fault (to_terminal_ours_for_output,
818 (void (*) (void))
819 target_ignore);
820 de_fault (to_terminal_ours,
821 (void (*) (void))
822 target_ignore);
823 de_fault (to_terminal_save_ours,
824 (void (*) (void))
825 target_ignore);
826 de_fault (to_terminal_info,
827 default_terminal_info);
828 de_fault (to_load,
829 (void (*) (char *, int))
830 tcomplain);
831 de_fault (to_post_startup_inferior,
832 (void (*) (ptid_t))
833 target_ignore);
834 de_fault (to_insert_fork_catchpoint,
835 (int (*) (int))
836 return_one);
837 de_fault (to_remove_fork_catchpoint,
838 (int (*) (int))
839 return_one);
840 de_fault (to_insert_vfork_catchpoint,
841 (int (*) (int))
842 return_one);
843 de_fault (to_remove_vfork_catchpoint,
844 (int (*) (int))
845 return_one);
846 de_fault (to_insert_exec_catchpoint,
847 (int (*) (int))
848 return_one);
849 de_fault (to_remove_exec_catchpoint,
850 (int (*) (int))
851 return_one);
852 de_fault (to_set_syscall_catchpoint,
853 (int (*) (int, int, int, int, int *))
854 return_one);
855 de_fault (to_has_exited,
856 (int (*) (int, int, int *))
857 return_zero);
858 de_fault (to_can_run,
859 return_zero);
860 de_fault (to_extra_thread_info,
861 (char *(*) (struct thread_info *))
862 return_zero);
863 de_fault (to_thread_name,
864 (char *(*) (struct thread_info *))
865 return_zero);
866 de_fault (to_stop,
867 (void (*) (ptid_t))
868 target_ignore);
869 current_target.to_xfer_partial = current_xfer_partial;
870 de_fault (to_rcmd,
871 (void (*) (char *, struct ui_file *))
872 tcomplain);
873 de_fault (to_pid_to_exec_file,
874 (char *(*) (int))
875 return_zero);
876 de_fault (to_async,
877 (void (*) (void (*) (enum inferior_event_type, void*), void*))
878 tcomplain);
879 de_fault (to_thread_architecture,
880 default_thread_architecture);
881 current_target.to_read_description = NULL;
882 de_fault (to_get_ada_task_ptid,
883 (ptid_t (*) (long, long))
884 default_get_ada_task_ptid);
885 de_fault (to_supports_multi_process,
886 (int (*) (void))
887 return_zero);
888 de_fault (to_supports_enable_disable_tracepoint,
889 (int (*) (void))
890 return_zero);
891 de_fault (to_supports_string_tracing,
892 (int (*) (void))
893 return_zero);
894 de_fault (to_trace_init,
895 (void (*) (void))
896 tcomplain);
897 de_fault (to_download_tracepoint,
898 (void (*) (struct bp_location *))
899 tcomplain);
900 de_fault (to_can_download_tracepoint,
901 (int (*) (void))
902 return_zero);
903 de_fault (to_download_trace_state_variable,
904 (void (*) (struct trace_state_variable *))
905 tcomplain);
906 de_fault (to_enable_tracepoint,
907 (void (*) (struct bp_location *))
908 tcomplain);
909 de_fault (to_disable_tracepoint,
910 (void (*) (struct bp_location *))
911 tcomplain);
912 de_fault (to_trace_set_readonly_regions,
913 (void (*) (void))
914 tcomplain);
915 de_fault (to_trace_start,
916 (void (*) (void))
917 tcomplain);
918 de_fault (to_get_trace_status,
919 (int (*) (struct trace_status *))
920 return_minus_one);
921 de_fault (to_get_tracepoint_status,
922 (void (*) (struct breakpoint *, struct uploaded_tp *))
923 tcomplain);
924 de_fault (to_trace_stop,
925 (void (*) (void))
926 tcomplain);
927 de_fault (to_trace_find,
928 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
929 return_minus_one);
930 de_fault (to_get_trace_state_variable_value,
931 (int (*) (int, LONGEST *))
932 return_zero);
933 de_fault (to_save_trace_data,
934 (int (*) (const char *))
935 tcomplain);
936 de_fault (to_upload_tracepoints,
937 (int (*) (struct uploaded_tp **))
938 return_zero);
939 de_fault (to_upload_trace_state_variables,
940 (int (*) (struct uploaded_tsv **))
941 return_zero);
942 de_fault (to_get_raw_trace_data,
943 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
944 tcomplain);
945 de_fault (to_get_min_fast_tracepoint_insn_len,
946 (int (*) (void))
947 return_minus_one);
948 de_fault (to_set_disconnected_tracing,
949 (void (*) (int))
950 target_ignore);
951 de_fault (to_set_circular_trace_buffer,
952 (void (*) (int))
953 target_ignore);
954 de_fault (to_set_trace_buffer_size,
955 (void (*) (LONGEST))
956 target_ignore);
957 de_fault (to_set_trace_notes,
958 (int (*) (const char *, const char *, const char *))
959 return_zero);
960 de_fault (to_get_tib_address,
961 (int (*) (ptid_t, CORE_ADDR *))
962 tcomplain);
963 de_fault (to_set_permissions,
964 (void (*) (void))
965 target_ignore);
966 de_fault (to_static_tracepoint_marker_at,
967 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
968 return_zero);
969 de_fault (to_static_tracepoint_markers_by_strid,
970 (VEC(static_tracepoint_marker_p) * (*) (const char *))
971 tcomplain);
972 de_fault (to_traceframe_info,
973 (struct traceframe_info * (*) (void))
974 return_zero);
975 de_fault (to_supports_evaluation_of_breakpoint_conditions,
976 (int (*) (void))
977 return_zero);
978 de_fault (to_can_run_breakpoint_commands,
979 (int (*) (void))
980 return_zero);
981 de_fault (to_use_agent,
982 (int (*) (int))
983 tcomplain);
984 de_fault (to_can_use_agent,
985 (int (*) (void))
986 return_zero);
987 de_fault (to_augmented_libraries_svr4_read,
988 (int (*) (void))
989 return_zero);
990 de_fault (to_execution_direction, default_execution_direction);
991
992 #undef de_fault
993
994 /* Finally, position the target-stack beneath the squashed
995 "current_target". That way code looking for a non-inherited
996 target method can quickly and simply find it. */
997 current_target.beneath = target_stack;
998
999 if (targetdebug)
1000 setup_target_debug ();
1001 }
1002
1003 /* Push a new target type into the stack of the existing target accessors,
1004 possibly superseding some of the existing accessors.
1005
1006 Rather than allow an empty stack, we always have the dummy target at
1007 the bottom stratum, so we can call the function vectors without
1008 checking them. */
1009
1010 void
1011 push_target (struct target_ops *t)
1012 {
1013 struct target_ops **cur;
1014
1015 /* Check magic number. If wrong, it probably means someone changed
1016 the struct definition, but not all the places that initialize one. */
1017 if (t->to_magic != OPS_MAGIC)
1018 {
1019 fprintf_unfiltered (gdb_stderr,
1020 "Magic number of %s target struct wrong\n",
1021 t->to_shortname);
1022 internal_error (__FILE__, __LINE__,
1023 _("failed internal consistency check"));
1024 }
1025
1026 /* Find the proper stratum to install this target in. */
1027 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1028 {
1029 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1030 break;
1031 }
1032
1033 /* If there's already targets at this stratum, remove them. */
1034 /* FIXME: cagney/2003-10-15: I think this should be popping all
1035 targets to CUR, and not just those at this stratum level. */
1036 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1037 {
1038 /* There's already something at this stratum level. Close it,
1039 and un-hook it from the stack. */
1040 struct target_ops *tmp = (*cur);
1041
1042 (*cur) = (*cur)->beneath;
1043 tmp->beneath = NULL;
1044 target_close (tmp);
1045 }
1046
1047 /* We have removed all targets in our stratum, now add the new one. */
1048 t->beneath = (*cur);
1049 (*cur) = t;
1050
1051 update_current_target ();
1052 }
1053
1054 /* Remove a target_ops vector from the stack, wherever it may be.
1055 Return how many times it was removed (0 or 1). */
1056
1057 int
1058 unpush_target (struct target_ops *t)
1059 {
1060 struct target_ops **cur;
1061 struct target_ops *tmp;
1062
1063 if (t->to_stratum == dummy_stratum)
1064 internal_error (__FILE__, __LINE__,
1065 _("Attempt to unpush the dummy target"));
1066
1067 /* Look for the specified target. Note that we assume that a target
1068 can only occur once in the target stack. */
1069
1070 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1071 {
1072 if ((*cur) == t)
1073 break;
1074 }
1075
1076 /* If we don't find target_ops, quit. Only open targets should be
1077 closed. */
1078 if ((*cur) == NULL)
1079 return 0;
1080
1081 /* Unchain the target. */
1082 tmp = (*cur);
1083 (*cur) = (*cur)->beneath;
1084 tmp->beneath = NULL;
1085
1086 update_current_target ();
1087
1088 /* Finally close the target. Note we do this after unchaining, so
1089 any target method calls from within the target_close
1090 implementation don't end up in T anymore. */
1091 target_close (t);
1092
1093 return 1;
1094 }
1095
1096 void
1097 pop_all_targets_above (enum strata above_stratum)
1098 {
1099 while ((int) (current_target.to_stratum) > (int) above_stratum)
1100 {
1101 if (!unpush_target (target_stack))
1102 {
1103 fprintf_unfiltered (gdb_stderr,
1104 "pop_all_targets couldn't find target %s\n",
1105 target_stack->to_shortname);
1106 internal_error (__FILE__, __LINE__,
1107 _("failed internal consistency check"));
1108 break;
1109 }
1110 }
1111 }
1112
1113 void
1114 pop_all_targets (void)
1115 {
1116 pop_all_targets_above (dummy_stratum);
1117 }
1118
1119 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1120
1121 int
1122 target_is_pushed (struct target_ops *t)
1123 {
1124 struct target_ops **cur;
1125
1126 /* Check magic number. If wrong, it probably means someone changed
1127 the struct definition, but not all the places that initialize one. */
1128 if (t->to_magic != OPS_MAGIC)
1129 {
1130 fprintf_unfiltered (gdb_stderr,
1131 "Magic number of %s target struct wrong\n",
1132 t->to_shortname);
1133 internal_error (__FILE__, __LINE__,
1134 _("failed internal consistency check"));
1135 }
1136
1137 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1138 if (*cur == t)
1139 return 1;
1140
1141 return 0;
1142 }
1143
1144 /* Using the objfile specified in OBJFILE, find the address for the
1145 current thread's thread-local storage with offset OFFSET. */
1146 CORE_ADDR
1147 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1148 {
1149 volatile CORE_ADDR addr = 0;
1150 struct target_ops *target;
1151
1152 for (target = current_target.beneath;
1153 target != NULL;
1154 target = target->beneath)
1155 {
1156 if (target->to_get_thread_local_address != NULL)
1157 break;
1158 }
1159
1160 if (target != NULL
1161 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1162 {
1163 ptid_t ptid = inferior_ptid;
1164 volatile struct gdb_exception ex;
1165
1166 TRY_CATCH (ex, RETURN_MASK_ALL)
1167 {
1168 CORE_ADDR lm_addr;
1169
1170 /* Fetch the load module address for this objfile. */
1171 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1172 objfile);
1173 /* If it's 0, throw the appropriate exception. */
1174 if (lm_addr == 0)
1175 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1176 _("TLS load module not found"));
1177
1178 addr = target->to_get_thread_local_address (target, ptid,
1179 lm_addr, offset);
1180 }
1181 /* If an error occurred, print TLS related messages here. Otherwise,
1182 throw the error to some higher catcher. */
1183 if (ex.reason < 0)
1184 {
1185 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1186
1187 switch (ex.error)
1188 {
1189 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1190 error (_("Cannot find thread-local variables "
1191 "in this thread library."));
1192 break;
1193 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1194 if (objfile_is_library)
1195 error (_("Cannot find shared library `%s' in dynamic"
1196 " linker's load module list"), objfile->name);
1197 else
1198 error (_("Cannot find executable file `%s' in dynamic"
1199 " linker's load module list"), objfile->name);
1200 break;
1201 case TLS_NOT_ALLOCATED_YET_ERROR:
1202 if (objfile_is_library)
1203 error (_("The inferior has not yet allocated storage for"
1204 " thread-local variables in\n"
1205 "the shared library `%s'\n"
1206 "for %s"),
1207 objfile->name, target_pid_to_str (ptid));
1208 else
1209 error (_("The inferior has not yet allocated storage for"
1210 " thread-local variables in\n"
1211 "the executable `%s'\n"
1212 "for %s"),
1213 objfile->name, target_pid_to_str (ptid));
1214 break;
1215 case TLS_GENERIC_ERROR:
1216 if (objfile_is_library)
1217 error (_("Cannot find thread-local storage for %s, "
1218 "shared library %s:\n%s"),
1219 target_pid_to_str (ptid),
1220 objfile->name, ex.message);
1221 else
1222 error (_("Cannot find thread-local storage for %s, "
1223 "executable file %s:\n%s"),
1224 target_pid_to_str (ptid),
1225 objfile->name, ex.message);
1226 break;
1227 default:
1228 throw_exception (ex);
1229 break;
1230 }
1231 }
1232 }
1233 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1234 TLS is an ABI-specific thing. But we don't do that yet. */
1235 else
1236 error (_("Cannot find thread-local variables on this target"));
1237
1238 return addr;
1239 }
1240
1241 #undef MIN
1242 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1243
1244 /* target_read_string -- read a null terminated string, up to LEN bytes,
1245 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1246 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1247 is responsible for freeing it. Return the number of bytes successfully
1248 read. */
1249
1250 int
1251 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1252 {
1253 int tlen, offset, i;
1254 gdb_byte buf[4];
1255 int errcode = 0;
1256 char *buffer;
1257 int buffer_allocated;
1258 char *bufptr;
1259 unsigned int nbytes_read = 0;
1260
1261 gdb_assert (string);
1262
1263 /* Small for testing. */
1264 buffer_allocated = 4;
1265 buffer = xmalloc (buffer_allocated);
1266 bufptr = buffer;
1267
1268 while (len > 0)
1269 {
1270 tlen = MIN (len, 4 - (memaddr & 3));
1271 offset = memaddr & 3;
1272
1273 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1274 if (errcode != 0)
1275 {
1276 /* The transfer request might have crossed the boundary to an
1277 unallocated region of memory. Retry the transfer, requesting
1278 a single byte. */
1279 tlen = 1;
1280 offset = 0;
1281 errcode = target_read_memory (memaddr, buf, 1);
1282 if (errcode != 0)
1283 goto done;
1284 }
1285
1286 if (bufptr - buffer + tlen > buffer_allocated)
1287 {
1288 unsigned int bytes;
1289
1290 bytes = bufptr - buffer;
1291 buffer_allocated *= 2;
1292 buffer = xrealloc (buffer, buffer_allocated);
1293 bufptr = buffer + bytes;
1294 }
1295
1296 for (i = 0; i < tlen; i++)
1297 {
1298 *bufptr++ = buf[i + offset];
1299 if (buf[i + offset] == '\000')
1300 {
1301 nbytes_read += i + 1;
1302 goto done;
1303 }
1304 }
1305
1306 memaddr += tlen;
1307 len -= tlen;
1308 nbytes_read += tlen;
1309 }
1310 done:
1311 *string = buffer;
1312 if (errnop != NULL)
1313 *errnop = errcode;
1314 return nbytes_read;
1315 }
1316
1317 struct target_section_table *
1318 target_get_section_table (struct target_ops *target)
1319 {
1320 struct target_ops *t;
1321
1322 if (targetdebug)
1323 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1324
1325 for (t = target; t != NULL; t = t->beneath)
1326 if (t->to_get_section_table != NULL)
1327 return (*t->to_get_section_table) (t);
1328
1329 return NULL;
1330 }
1331
1332 /* Find a section containing ADDR. */
1333
1334 struct target_section *
1335 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1336 {
1337 struct target_section_table *table = target_get_section_table (target);
1338 struct target_section *secp;
1339
1340 if (table == NULL)
1341 return NULL;
1342
1343 for (secp = table->sections; secp < table->sections_end; secp++)
1344 {
1345 if (addr >= secp->addr && addr < secp->endaddr)
1346 return secp;
1347 }
1348 return NULL;
1349 }
1350
1351 /* Read memory from the live target, even if currently inspecting a
1352 traceframe. The return is the same as that of target_read. */
1353
1354 static LONGEST
1355 target_read_live_memory (enum target_object object,
1356 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1357 {
1358 int ret;
1359 struct cleanup *cleanup;
1360
1361 /* Switch momentarily out of tfind mode so to access live memory.
1362 Note that this must not clear global state, such as the frame
1363 cache, which must still remain valid for the previous traceframe.
1364 We may be _building_ the frame cache at this point. */
1365 cleanup = make_cleanup_restore_traceframe_number ();
1366 set_traceframe_number (-1);
1367
1368 ret = target_read (current_target.beneath, object, NULL,
1369 myaddr, memaddr, len);
1370
1371 do_cleanups (cleanup);
1372 return ret;
1373 }
1374
1375 /* Using the set of read-only target sections of OPS, read live
1376 read-only memory. Note that the actual reads start from the
1377 top-most target again.
1378
1379 For interface/parameters/return description see target.h,
1380 to_xfer_partial. */
1381
1382 static LONGEST
1383 memory_xfer_live_readonly_partial (struct target_ops *ops,
1384 enum target_object object,
1385 gdb_byte *readbuf, ULONGEST memaddr,
1386 LONGEST len)
1387 {
1388 struct target_section *secp;
1389 struct target_section_table *table;
1390
1391 secp = target_section_by_addr (ops, memaddr);
1392 if (secp != NULL
1393 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1394 secp->the_bfd_section)
1395 & SEC_READONLY))
1396 {
1397 struct target_section *p;
1398 ULONGEST memend = memaddr + len;
1399
1400 table = target_get_section_table (ops);
1401
1402 for (p = table->sections; p < table->sections_end; p++)
1403 {
1404 if (memaddr >= p->addr)
1405 {
1406 if (memend <= p->endaddr)
1407 {
1408 /* Entire transfer is within this section. */
1409 return target_read_live_memory (object, memaddr,
1410 readbuf, len);
1411 }
1412 else if (memaddr >= p->endaddr)
1413 {
1414 /* This section ends before the transfer starts. */
1415 continue;
1416 }
1417 else
1418 {
1419 /* This section overlaps the transfer. Just do half. */
1420 len = p->endaddr - memaddr;
1421 return target_read_live_memory (object, memaddr,
1422 readbuf, len);
1423 }
1424 }
1425 }
1426 }
1427
1428 return 0;
1429 }
1430
1431 /* Perform a partial memory transfer.
1432 For docs see target.h, to_xfer_partial. */
1433
1434 static LONGEST
1435 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1436 void *readbuf, const void *writebuf, ULONGEST memaddr,
1437 LONGEST len)
1438 {
1439 LONGEST res;
1440 int reg_len;
1441 struct mem_region *region;
1442 struct inferior *inf;
1443
1444 /* For accesses to unmapped overlay sections, read directly from
1445 files. Must do this first, as MEMADDR may need adjustment. */
1446 if (readbuf != NULL && overlay_debugging)
1447 {
1448 struct obj_section *section = find_pc_overlay (memaddr);
1449
1450 if (pc_in_unmapped_range (memaddr, section))
1451 {
1452 struct target_section_table *table
1453 = target_get_section_table (ops);
1454 const char *section_name = section->the_bfd_section->name;
1455
1456 memaddr = overlay_mapped_address (memaddr, section);
1457 return section_table_xfer_memory_partial (readbuf, writebuf,
1458 memaddr, len,
1459 table->sections,
1460 table->sections_end,
1461 section_name);
1462 }
1463 }
1464
1465 /* Try the executable files, if "trust-readonly-sections" is set. */
1466 if (readbuf != NULL && trust_readonly)
1467 {
1468 struct target_section *secp;
1469 struct target_section_table *table;
1470
1471 secp = target_section_by_addr (ops, memaddr);
1472 if (secp != NULL
1473 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1474 secp->the_bfd_section)
1475 & SEC_READONLY))
1476 {
1477 table = target_get_section_table (ops);
1478 return section_table_xfer_memory_partial (readbuf, writebuf,
1479 memaddr, len,
1480 table->sections,
1481 table->sections_end,
1482 NULL);
1483 }
1484 }
1485
1486 /* If reading unavailable memory in the context of traceframes, and
1487 this address falls within a read-only section, fallback to
1488 reading from live memory. */
1489 if (readbuf != NULL && get_traceframe_number () != -1)
1490 {
1491 VEC(mem_range_s) *available;
1492
1493 /* If we fail to get the set of available memory, then the
1494 target does not support querying traceframe info, and so we
1495 attempt reading from the traceframe anyway (assuming the
1496 target implements the old QTro packet then). */
1497 if (traceframe_available_memory (&available, memaddr, len))
1498 {
1499 struct cleanup *old_chain;
1500
1501 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1502
1503 if (VEC_empty (mem_range_s, available)
1504 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1505 {
1506 /* Don't read into the traceframe's available
1507 memory. */
1508 if (!VEC_empty (mem_range_s, available))
1509 {
1510 LONGEST oldlen = len;
1511
1512 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1513 gdb_assert (len <= oldlen);
1514 }
1515
1516 do_cleanups (old_chain);
1517
1518 /* This goes through the topmost target again. */
1519 res = memory_xfer_live_readonly_partial (ops, object,
1520 readbuf, memaddr, len);
1521 if (res > 0)
1522 return res;
1523
1524 /* No use trying further, we know some memory starting
1525 at MEMADDR isn't available. */
1526 return -1;
1527 }
1528
1529 /* Don't try to read more than how much is available, in
1530 case the target implements the deprecated QTro packet to
1531 cater for older GDBs (the target's knowledge of read-only
1532 sections may be outdated by now). */
1533 len = VEC_index (mem_range_s, available, 0)->length;
1534
1535 do_cleanups (old_chain);
1536 }
1537 }
1538
1539 /* Try GDB's internal data cache. */
1540 region = lookup_mem_region (memaddr);
1541 /* region->hi == 0 means there's no upper bound. */
1542 if (memaddr + len < region->hi || region->hi == 0)
1543 reg_len = len;
1544 else
1545 reg_len = region->hi - memaddr;
1546
1547 switch (region->attrib.mode)
1548 {
1549 case MEM_RO:
1550 if (writebuf != NULL)
1551 return -1;
1552 break;
1553
1554 case MEM_WO:
1555 if (readbuf != NULL)
1556 return -1;
1557 break;
1558
1559 case MEM_FLASH:
1560 /* We only support writing to flash during "load" for now. */
1561 if (writebuf != NULL)
1562 error (_("Writing to flash memory forbidden in this context"));
1563 break;
1564
1565 case MEM_NONE:
1566 return -1;
1567 }
1568
1569 if (!ptid_equal (inferior_ptid, null_ptid))
1570 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1571 else
1572 inf = NULL;
1573
1574 if (inf != NULL
1575 /* The dcache reads whole cache lines; that doesn't play well
1576 with reading from a trace buffer, because reading outside of
1577 the collected memory range fails. */
1578 && get_traceframe_number () == -1
1579 && (region->attrib.cache
1580 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1581 {
1582 if (readbuf != NULL)
1583 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1584 reg_len, 0);
1585 else
1586 /* FIXME drow/2006-08-09: If we're going to preserve const
1587 correctness dcache_xfer_memory should take readbuf and
1588 writebuf. */
1589 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1590 (void *) writebuf,
1591 reg_len, 1);
1592 if (res <= 0)
1593 return -1;
1594 else
1595 return res;
1596 }
1597
1598 /* If none of those methods found the memory we wanted, fall back
1599 to a target partial transfer. Normally a single call to
1600 to_xfer_partial is enough; if it doesn't recognize an object
1601 it will call the to_xfer_partial of the next target down.
1602 But for memory this won't do. Memory is the only target
1603 object which can be read from more than one valid target.
1604 A core file, for instance, could have some of memory but
1605 delegate other bits to the target below it. So, we must
1606 manually try all targets. */
1607
1608 do
1609 {
1610 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1611 readbuf, writebuf, memaddr, reg_len);
1612 if (res > 0)
1613 break;
1614
1615 /* We want to continue past core files to executables, but not
1616 past a running target's memory. */
1617 if (ops->to_has_all_memory (ops))
1618 break;
1619
1620 ops = ops->beneath;
1621 }
1622 while (ops != NULL);
1623
1624 /* Make sure the cache gets updated no matter what - if we are writing
1625 to the stack. Even if this write is not tagged as such, we still need
1626 to update the cache. */
1627
1628 if (res > 0
1629 && inf != NULL
1630 && writebuf != NULL
1631 && !region->attrib.cache
1632 && stack_cache_enabled_p
1633 && object != TARGET_OBJECT_STACK_MEMORY)
1634 {
1635 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1636 }
1637
1638 /* If we still haven't got anything, return the last error. We
1639 give up. */
1640 return res;
1641 }
1642
1643 /* Perform a partial memory transfer. For docs see target.h,
1644 to_xfer_partial. */
1645
1646 static LONGEST
1647 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1648 void *readbuf, const void *writebuf, ULONGEST memaddr,
1649 LONGEST len)
1650 {
1651 int res;
1652
1653 /* Zero length requests are ok and require no work. */
1654 if (len == 0)
1655 return 0;
1656
1657 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1658 breakpoint insns, thus hiding out from higher layers whether
1659 there are software breakpoints inserted in the code stream. */
1660 if (readbuf != NULL)
1661 {
1662 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1663
1664 if (res > 0 && !show_memory_breakpoints)
1665 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1666 }
1667 else
1668 {
1669 void *buf;
1670 struct cleanup *old_chain;
1671
1672 buf = xmalloc (len);
1673 old_chain = make_cleanup (xfree, buf);
1674 memcpy (buf, writebuf, len);
1675
1676 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1677 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1678
1679 do_cleanups (old_chain);
1680 }
1681
1682 return res;
1683 }
1684
1685 static void
1686 restore_show_memory_breakpoints (void *arg)
1687 {
1688 show_memory_breakpoints = (uintptr_t) arg;
1689 }
1690
1691 struct cleanup *
1692 make_show_memory_breakpoints_cleanup (int show)
1693 {
1694 int current = show_memory_breakpoints;
1695
1696 show_memory_breakpoints = show;
1697 return make_cleanup (restore_show_memory_breakpoints,
1698 (void *) (uintptr_t) current);
1699 }
1700
1701 /* For docs see target.h, to_xfer_partial. */
1702
1703 static LONGEST
1704 target_xfer_partial (struct target_ops *ops,
1705 enum target_object object, const char *annex,
1706 void *readbuf, const void *writebuf,
1707 ULONGEST offset, LONGEST len)
1708 {
1709 LONGEST retval;
1710
1711 gdb_assert (ops->to_xfer_partial != NULL);
1712
1713 if (writebuf && !may_write_memory)
1714 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1715 core_addr_to_string_nz (offset), plongest (len));
1716
1717 /* If this is a memory transfer, let the memory-specific code
1718 have a look at it instead. Memory transfers are more
1719 complicated. */
1720 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1721 retval = memory_xfer_partial (ops, object, readbuf,
1722 writebuf, offset, len);
1723 else
1724 {
1725 enum target_object raw_object = object;
1726
1727 /* If this is a raw memory transfer, request the normal
1728 memory object from other layers. */
1729 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1730 raw_object = TARGET_OBJECT_MEMORY;
1731
1732 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1733 writebuf, offset, len);
1734 }
1735
1736 if (targetdebug)
1737 {
1738 const unsigned char *myaddr = NULL;
1739
1740 fprintf_unfiltered (gdb_stdlog,
1741 "%s:target_xfer_partial "
1742 "(%d, %s, %s, %s, %s, %s) = %s",
1743 ops->to_shortname,
1744 (int) object,
1745 (annex ? annex : "(null)"),
1746 host_address_to_string (readbuf),
1747 host_address_to_string (writebuf),
1748 core_addr_to_string_nz (offset),
1749 plongest (len), plongest (retval));
1750
1751 if (readbuf)
1752 myaddr = readbuf;
1753 if (writebuf)
1754 myaddr = writebuf;
1755 if (retval > 0 && myaddr != NULL)
1756 {
1757 int i;
1758
1759 fputs_unfiltered (", bytes =", gdb_stdlog);
1760 for (i = 0; i < retval; i++)
1761 {
1762 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1763 {
1764 if (targetdebug < 2 && i > 0)
1765 {
1766 fprintf_unfiltered (gdb_stdlog, " ...");
1767 break;
1768 }
1769 fprintf_unfiltered (gdb_stdlog, "\n");
1770 }
1771
1772 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1773 }
1774 }
1775
1776 fputc_unfiltered ('\n', gdb_stdlog);
1777 }
1778 return retval;
1779 }
1780
1781 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1782 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1783 if any error occurs.
1784
1785 If an error occurs, no guarantee is made about the contents of the data at
1786 MYADDR. In particular, the caller should not depend upon partial reads
1787 filling the buffer with good data. There is no way for the caller to know
1788 how much good data might have been transfered anyway. Callers that can
1789 deal with partial reads should call target_read (which will retry until
1790 it makes no progress, and then return how much was transferred). */
1791
1792 int
1793 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1794 {
1795 /* Dispatch to the topmost target, not the flattened current_target.
1796 Memory accesses check target->to_has_(all_)memory, and the
1797 flattened target doesn't inherit those. */
1798 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1799 myaddr, memaddr, len) == len)
1800 return 0;
1801 else
1802 return EIO;
1803 }
1804
1805 /* Like target_read_memory, but specify explicitly that this is a read from
1806 the target's stack. This may trigger different cache behavior. */
1807
1808 int
1809 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1810 {
1811 /* Dispatch to the topmost target, not the flattened current_target.
1812 Memory accesses check target->to_has_(all_)memory, and the
1813 flattened target doesn't inherit those. */
1814
1815 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1816 myaddr, memaddr, len) == len)
1817 return 0;
1818 else
1819 return EIO;
1820 }
1821
1822 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1823 Returns either 0 for success or an errno value if any error occurs.
1824 If an error occurs, no guarantee is made about how much data got written.
1825 Callers that can deal with partial writes should call target_write. */
1826
1827 int
1828 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1829 {
1830 /* Dispatch to the topmost target, not the flattened current_target.
1831 Memory accesses check target->to_has_(all_)memory, and the
1832 flattened target doesn't inherit those. */
1833 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1834 myaddr, memaddr, len) == len)
1835 return 0;
1836 else
1837 return EIO;
1838 }
1839
1840 /* Write LEN bytes from MYADDR to target raw memory at address
1841 MEMADDR. Returns either 0 for success or an errno value if any
1842 error occurs. If an error occurs, no guarantee is made about how
1843 much data got written. Callers that can deal with partial writes
1844 should call target_write. */
1845
1846 int
1847 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1848 {
1849 /* Dispatch to the topmost target, not the flattened current_target.
1850 Memory accesses check target->to_has_(all_)memory, and the
1851 flattened target doesn't inherit those. */
1852 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1853 myaddr, memaddr, len) == len)
1854 return 0;
1855 else
1856 return EIO;
1857 }
1858
1859 /* Fetch the target's memory map. */
1860
1861 VEC(mem_region_s) *
1862 target_memory_map (void)
1863 {
1864 VEC(mem_region_s) *result;
1865 struct mem_region *last_one, *this_one;
1866 int ix;
1867 struct target_ops *t;
1868
1869 if (targetdebug)
1870 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1871
1872 for (t = current_target.beneath; t != NULL; t = t->beneath)
1873 if (t->to_memory_map != NULL)
1874 break;
1875
1876 if (t == NULL)
1877 return NULL;
1878
1879 result = t->to_memory_map (t);
1880 if (result == NULL)
1881 return NULL;
1882
1883 qsort (VEC_address (mem_region_s, result),
1884 VEC_length (mem_region_s, result),
1885 sizeof (struct mem_region), mem_region_cmp);
1886
1887 /* Check that regions do not overlap. Simultaneously assign
1888 a numbering for the "mem" commands to use to refer to
1889 each region. */
1890 last_one = NULL;
1891 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1892 {
1893 this_one->number = ix;
1894
1895 if (last_one && last_one->hi > this_one->lo)
1896 {
1897 warning (_("Overlapping regions in memory map: ignoring"));
1898 VEC_free (mem_region_s, result);
1899 return NULL;
1900 }
1901 last_one = this_one;
1902 }
1903
1904 return result;
1905 }
1906
1907 void
1908 target_flash_erase (ULONGEST address, LONGEST length)
1909 {
1910 struct target_ops *t;
1911
1912 for (t = current_target.beneath; t != NULL; t = t->beneath)
1913 if (t->to_flash_erase != NULL)
1914 {
1915 if (targetdebug)
1916 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1917 hex_string (address), phex (length, 0));
1918 t->to_flash_erase (t, address, length);
1919 return;
1920 }
1921
1922 tcomplain ();
1923 }
1924
1925 void
1926 target_flash_done (void)
1927 {
1928 struct target_ops *t;
1929
1930 for (t = current_target.beneath; t != NULL; t = t->beneath)
1931 if (t->to_flash_done != NULL)
1932 {
1933 if (targetdebug)
1934 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1935 t->to_flash_done (t);
1936 return;
1937 }
1938
1939 tcomplain ();
1940 }
1941
1942 static void
1943 show_trust_readonly (struct ui_file *file, int from_tty,
1944 struct cmd_list_element *c, const char *value)
1945 {
1946 fprintf_filtered (file,
1947 _("Mode for reading from readonly sections is %s.\n"),
1948 value);
1949 }
1950
1951 /* More generic transfers. */
1952
1953 static LONGEST
1954 default_xfer_partial (struct target_ops *ops, enum target_object object,
1955 const char *annex, gdb_byte *readbuf,
1956 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1957 {
1958 if (object == TARGET_OBJECT_MEMORY
1959 && ops->deprecated_xfer_memory != NULL)
1960 /* If available, fall back to the target's
1961 "deprecated_xfer_memory" method. */
1962 {
1963 int xfered = -1;
1964
1965 errno = 0;
1966 if (writebuf != NULL)
1967 {
1968 void *buffer = xmalloc (len);
1969 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1970
1971 memcpy (buffer, writebuf, len);
1972 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1973 1/*write*/, NULL, ops);
1974 do_cleanups (cleanup);
1975 }
1976 if (readbuf != NULL)
1977 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1978 0/*read*/, NULL, ops);
1979 if (xfered > 0)
1980 return xfered;
1981 else if (xfered == 0 && errno == 0)
1982 /* "deprecated_xfer_memory" uses 0, cross checked against
1983 ERRNO as one indication of an error. */
1984 return 0;
1985 else
1986 return -1;
1987 }
1988 else if (ops->beneath != NULL)
1989 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1990 readbuf, writebuf, offset, len);
1991 else
1992 return -1;
1993 }
1994
1995 /* The xfer_partial handler for the topmost target. Unlike the default,
1996 it does not need to handle memory specially; it just passes all
1997 requests down the stack. */
1998
1999 static LONGEST
2000 current_xfer_partial (struct target_ops *ops, enum target_object object,
2001 const char *annex, gdb_byte *readbuf,
2002 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
2003 {
2004 if (ops->beneath != NULL)
2005 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2006 readbuf, writebuf, offset, len);
2007 else
2008 return -1;
2009 }
2010
2011 /* Target vector read/write partial wrapper functions. */
2012
2013 static LONGEST
2014 target_read_partial (struct target_ops *ops,
2015 enum target_object object,
2016 const char *annex, gdb_byte *buf,
2017 ULONGEST offset, LONGEST len)
2018 {
2019 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2020 }
2021
2022 static LONGEST
2023 target_write_partial (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, const gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027 {
2028 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2029 }
2030
2031 /* Wrappers to perform the full transfer. */
2032
2033 /* For docs on target_read see target.h. */
2034
2035 LONGEST
2036 target_read (struct target_ops *ops,
2037 enum target_object object,
2038 const char *annex, gdb_byte *buf,
2039 ULONGEST offset, LONGEST len)
2040 {
2041 LONGEST xfered = 0;
2042
2043 while (xfered < len)
2044 {
2045 LONGEST xfer = target_read_partial (ops, object, annex,
2046 (gdb_byte *) buf + xfered,
2047 offset + xfered, len - xfered);
2048
2049 /* Call an observer, notifying them of the xfer progress? */
2050 if (xfer == 0)
2051 return xfered;
2052 if (xfer < 0)
2053 return -1;
2054 xfered += xfer;
2055 QUIT;
2056 }
2057 return len;
2058 }
2059
2060 /* Assuming that the entire [begin, end) range of memory cannot be
2061 read, try to read whatever subrange is possible to read.
2062
2063 The function returns, in RESULT, either zero or one memory block.
2064 If there's a readable subrange at the beginning, it is completely
2065 read and returned. Any further readable subrange will not be read.
2066 Otherwise, if there's a readable subrange at the end, it will be
2067 completely read and returned. Any readable subranges before it
2068 (obviously, not starting at the beginning), will be ignored. In
2069 other cases -- either no readable subrange, or readable subrange(s)
2070 that is neither at the beginning, or end, nothing is returned.
2071
2072 The purpose of this function is to handle a read across a boundary
2073 of accessible memory in a case when memory map is not available.
2074 The above restrictions are fine for this case, but will give
2075 incorrect results if the memory is 'patchy'. However, supporting
2076 'patchy' memory would require trying to read every single byte,
2077 and it seems unacceptable solution. Explicit memory map is
2078 recommended for this case -- and target_read_memory_robust will
2079 take care of reading multiple ranges then. */
2080
2081 static void
2082 read_whatever_is_readable (struct target_ops *ops,
2083 ULONGEST begin, ULONGEST end,
2084 VEC(memory_read_result_s) **result)
2085 {
2086 gdb_byte *buf = xmalloc (end - begin);
2087 ULONGEST current_begin = begin;
2088 ULONGEST current_end = end;
2089 int forward;
2090 memory_read_result_s r;
2091
2092 /* If we previously failed to read 1 byte, nothing can be done here. */
2093 if (end - begin <= 1)
2094 {
2095 xfree (buf);
2096 return;
2097 }
2098
2099 /* Check that either first or the last byte is readable, and give up
2100 if not. This heuristic is meant to permit reading accessible memory
2101 at the boundary of accessible region. */
2102 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2103 buf, begin, 1) == 1)
2104 {
2105 forward = 1;
2106 ++current_begin;
2107 }
2108 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2109 buf + (end-begin) - 1, end - 1, 1) == 1)
2110 {
2111 forward = 0;
2112 --current_end;
2113 }
2114 else
2115 {
2116 xfree (buf);
2117 return;
2118 }
2119
2120 /* Loop invariant is that the [current_begin, current_end) was previously
2121 found to be not readable as a whole.
2122
2123 Note loop condition -- if the range has 1 byte, we can't divide the range
2124 so there's no point trying further. */
2125 while (current_end - current_begin > 1)
2126 {
2127 ULONGEST first_half_begin, first_half_end;
2128 ULONGEST second_half_begin, second_half_end;
2129 LONGEST xfer;
2130 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2131
2132 if (forward)
2133 {
2134 first_half_begin = current_begin;
2135 first_half_end = middle;
2136 second_half_begin = middle;
2137 second_half_end = current_end;
2138 }
2139 else
2140 {
2141 first_half_begin = middle;
2142 first_half_end = current_end;
2143 second_half_begin = current_begin;
2144 second_half_end = middle;
2145 }
2146
2147 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2148 buf + (first_half_begin - begin),
2149 first_half_begin,
2150 first_half_end - first_half_begin);
2151
2152 if (xfer == first_half_end - first_half_begin)
2153 {
2154 /* This half reads up fine. So, the error must be in the
2155 other half. */
2156 current_begin = second_half_begin;
2157 current_end = second_half_end;
2158 }
2159 else
2160 {
2161 /* This half is not readable. Because we've tried one byte, we
2162 know some part of this half if actually redable. Go to the next
2163 iteration to divide again and try to read.
2164
2165 We don't handle the other half, because this function only tries
2166 to read a single readable subrange. */
2167 current_begin = first_half_begin;
2168 current_end = first_half_end;
2169 }
2170 }
2171
2172 if (forward)
2173 {
2174 /* The [begin, current_begin) range has been read. */
2175 r.begin = begin;
2176 r.end = current_begin;
2177 r.data = buf;
2178 }
2179 else
2180 {
2181 /* The [current_end, end) range has been read. */
2182 LONGEST rlen = end - current_end;
2183
2184 r.data = xmalloc (rlen);
2185 memcpy (r.data, buf + current_end - begin, rlen);
2186 r.begin = current_end;
2187 r.end = end;
2188 xfree (buf);
2189 }
2190 VEC_safe_push(memory_read_result_s, (*result), &r);
2191 }
2192
2193 void
2194 free_memory_read_result_vector (void *x)
2195 {
2196 VEC(memory_read_result_s) *v = x;
2197 memory_read_result_s *current;
2198 int ix;
2199
2200 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2201 {
2202 xfree (current->data);
2203 }
2204 VEC_free (memory_read_result_s, v);
2205 }
2206
2207 VEC(memory_read_result_s) *
2208 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2209 {
2210 VEC(memory_read_result_s) *result = 0;
2211
2212 LONGEST xfered = 0;
2213 while (xfered < len)
2214 {
2215 struct mem_region *region = lookup_mem_region (offset + xfered);
2216 LONGEST rlen;
2217
2218 /* If there is no explicit region, a fake one should be created. */
2219 gdb_assert (region);
2220
2221 if (region->hi == 0)
2222 rlen = len - xfered;
2223 else
2224 rlen = region->hi - offset;
2225
2226 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2227 {
2228 /* Cannot read this region. Note that we can end up here only
2229 if the region is explicitly marked inaccessible, or
2230 'inaccessible-by-default' is in effect. */
2231 xfered += rlen;
2232 }
2233 else
2234 {
2235 LONGEST to_read = min (len - xfered, rlen);
2236 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2237
2238 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2239 (gdb_byte *) buffer,
2240 offset + xfered, to_read);
2241 /* Call an observer, notifying them of the xfer progress? */
2242 if (xfer <= 0)
2243 {
2244 /* Got an error reading full chunk. See if maybe we can read
2245 some subrange. */
2246 xfree (buffer);
2247 read_whatever_is_readable (ops, offset + xfered,
2248 offset + xfered + to_read, &result);
2249 xfered += to_read;
2250 }
2251 else
2252 {
2253 struct memory_read_result r;
2254 r.data = buffer;
2255 r.begin = offset + xfered;
2256 r.end = r.begin + xfer;
2257 VEC_safe_push (memory_read_result_s, result, &r);
2258 xfered += xfer;
2259 }
2260 QUIT;
2261 }
2262 }
2263 return result;
2264 }
2265
2266
2267 /* An alternative to target_write with progress callbacks. */
2268
2269 LONGEST
2270 target_write_with_progress (struct target_ops *ops,
2271 enum target_object object,
2272 const char *annex, const gdb_byte *buf,
2273 ULONGEST offset, LONGEST len,
2274 void (*progress) (ULONGEST, void *), void *baton)
2275 {
2276 LONGEST xfered = 0;
2277
2278 /* Give the progress callback a chance to set up. */
2279 if (progress)
2280 (*progress) (0, baton);
2281
2282 while (xfered < len)
2283 {
2284 LONGEST xfer = target_write_partial (ops, object, annex,
2285 (gdb_byte *) buf + xfered,
2286 offset + xfered, len - xfered);
2287
2288 if (xfer == 0)
2289 return xfered;
2290 if (xfer < 0)
2291 return -1;
2292
2293 if (progress)
2294 (*progress) (xfer, baton);
2295
2296 xfered += xfer;
2297 QUIT;
2298 }
2299 return len;
2300 }
2301
2302 /* For docs on target_write see target.h. */
2303
2304 LONGEST
2305 target_write (struct target_ops *ops,
2306 enum target_object object,
2307 const char *annex, const gdb_byte *buf,
2308 ULONGEST offset, LONGEST len)
2309 {
2310 return target_write_with_progress (ops, object, annex, buf, offset, len,
2311 NULL, NULL);
2312 }
2313
2314 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2315 the size of the transferred data. PADDING additional bytes are
2316 available in *BUF_P. This is a helper function for
2317 target_read_alloc; see the declaration of that function for more
2318 information. */
2319
2320 static LONGEST
2321 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2322 const char *annex, gdb_byte **buf_p, int padding)
2323 {
2324 size_t buf_alloc, buf_pos;
2325 gdb_byte *buf;
2326 LONGEST n;
2327
2328 /* This function does not have a length parameter; it reads the
2329 entire OBJECT). Also, it doesn't support objects fetched partly
2330 from one target and partly from another (in a different stratum,
2331 e.g. a core file and an executable). Both reasons make it
2332 unsuitable for reading memory. */
2333 gdb_assert (object != TARGET_OBJECT_MEMORY);
2334
2335 /* Start by reading up to 4K at a time. The target will throttle
2336 this number down if necessary. */
2337 buf_alloc = 4096;
2338 buf = xmalloc (buf_alloc);
2339 buf_pos = 0;
2340 while (1)
2341 {
2342 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2343 buf_pos, buf_alloc - buf_pos - padding);
2344 if (n < 0)
2345 {
2346 /* An error occurred. */
2347 xfree (buf);
2348 return -1;
2349 }
2350 else if (n == 0)
2351 {
2352 /* Read all there was. */
2353 if (buf_pos == 0)
2354 xfree (buf);
2355 else
2356 *buf_p = buf;
2357 return buf_pos;
2358 }
2359
2360 buf_pos += n;
2361
2362 /* If the buffer is filling up, expand it. */
2363 if (buf_alloc < buf_pos * 2)
2364 {
2365 buf_alloc *= 2;
2366 buf = xrealloc (buf, buf_alloc);
2367 }
2368
2369 QUIT;
2370 }
2371 }
2372
2373 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2374 the size of the transferred data. See the declaration in "target.h"
2375 function for more information about the return value. */
2376
2377 LONGEST
2378 target_read_alloc (struct target_ops *ops, enum target_object object,
2379 const char *annex, gdb_byte **buf_p)
2380 {
2381 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2382 }
2383
2384 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2385 returned as a string, allocated using xmalloc. If an error occurs
2386 or the transfer is unsupported, NULL is returned. Empty objects
2387 are returned as allocated but empty strings. A warning is issued
2388 if the result contains any embedded NUL bytes. */
2389
2390 char *
2391 target_read_stralloc (struct target_ops *ops, enum target_object object,
2392 const char *annex)
2393 {
2394 gdb_byte *buffer;
2395 char *bufstr;
2396 LONGEST i, transferred;
2397
2398 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2399 bufstr = (char *) buffer;
2400
2401 if (transferred < 0)
2402 return NULL;
2403
2404 if (transferred == 0)
2405 return xstrdup ("");
2406
2407 bufstr[transferred] = 0;
2408
2409 /* Check for embedded NUL bytes; but allow trailing NULs. */
2410 for (i = strlen (bufstr); i < transferred; i++)
2411 if (bufstr[i] != 0)
2412 {
2413 warning (_("target object %d, annex %s, "
2414 "contained unexpected null characters"),
2415 (int) object, annex ? annex : "(none)");
2416 break;
2417 }
2418
2419 return bufstr;
2420 }
2421
2422 /* Memory transfer methods. */
2423
2424 void
2425 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2426 LONGEST len)
2427 {
2428 /* This method is used to read from an alternate, non-current
2429 target. This read must bypass the overlay support (as symbols
2430 don't match this target), and GDB's internal cache (wrong cache
2431 for this target). */
2432 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2433 != len)
2434 memory_error (EIO, addr);
2435 }
2436
2437 ULONGEST
2438 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2439 int len, enum bfd_endian byte_order)
2440 {
2441 gdb_byte buf[sizeof (ULONGEST)];
2442
2443 gdb_assert (len <= sizeof (buf));
2444 get_target_memory (ops, addr, buf, len);
2445 return extract_unsigned_integer (buf, len, byte_order);
2446 }
2447
2448 int
2449 target_insert_breakpoint (struct gdbarch *gdbarch,
2450 struct bp_target_info *bp_tgt)
2451 {
2452 if (!may_insert_breakpoints)
2453 {
2454 warning (_("May not insert breakpoints"));
2455 return 1;
2456 }
2457
2458 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2459 }
2460
2461 int
2462 target_remove_breakpoint (struct gdbarch *gdbarch,
2463 struct bp_target_info *bp_tgt)
2464 {
2465 /* This is kind of a weird case to handle, but the permission might
2466 have been changed after breakpoints were inserted - in which case
2467 we should just take the user literally and assume that any
2468 breakpoints should be left in place. */
2469 if (!may_insert_breakpoints)
2470 {
2471 warning (_("May not remove breakpoints"));
2472 return 1;
2473 }
2474
2475 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2476 }
2477
2478 static void
2479 target_info (char *args, int from_tty)
2480 {
2481 struct target_ops *t;
2482 int has_all_mem = 0;
2483
2484 if (symfile_objfile != NULL)
2485 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2486
2487 for (t = target_stack; t != NULL; t = t->beneath)
2488 {
2489 if (!(*t->to_has_memory) (t))
2490 continue;
2491
2492 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2493 continue;
2494 if (has_all_mem)
2495 printf_unfiltered (_("\tWhile running this, "
2496 "GDB does not access memory from...\n"));
2497 printf_unfiltered ("%s:\n", t->to_longname);
2498 (t->to_files_info) (t);
2499 has_all_mem = (*t->to_has_all_memory) (t);
2500 }
2501 }
2502
2503 /* This function is called before any new inferior is created, e.g.
2504 by running a program, attaching, or connecting to a target.
2505 It cleans up any state from previous invocations which might
2506 change between runs. This is a subset of what target_preopen
2507 resets (things which might change between targets). */
2508
2509 void
2510 target_pre_inferior (int from_tty)
2511 {
2512 /* Clear out solib state. Otherwise the solib state of the previous
2513 inferior might have survived and is entirely wrong for the new
2514 target. This has been observed on GNU/Linux using glibc 2.3. How
2515 to reproduce:
2516
2517 bash$ ./foo&
2518 [1] 4711
2519 bash$ ./foo&
2520 [1] 4712
2521 bash$ gdb ./foo
2522 [...]
2523 (gdb) attach 4711
2524 (gdb) detach
2525 (gdb) attach 4712
2526 Cannot access memory at address 0xdeadbeef
2527 */
2528
2529 /* In some OSs, the shared library list is the same/global/shared
2530 across inferiors. If code is shared between processes, so are
2531 memory regions and features. */
2532 if (!gdbarch_has_global_solist (target_gdbarch ()))
2533 {
2534 no_shared_libraries (NULL, from_tty);
2535
2536 invalidate_target_mem_regions ();
2537
2538 target_clear_description ();
2539 }
2540
2541 agent_capability_invalidate ();
2542 }
2543
2544 /* Callback for iterate_over_inferiors. Gets rid of the given
2545 inferior. */
2546
2547 static int
2548 dispose_inferior (struct inferior *inf, void *args)
2549 {
2550 struct thread_info *thread;
2551
2552 thread = any_thread_of_process (inf->pid);
2553 if (thread)
2554 {
2555 switch_to_thread (thread->ptid);
2556
2557 /* Core inferiors actually should be detached, not killed. */
2558 if (target_has_execution)
2559 target_kill ();
2560 else
2561 target_detach (NULL, 0);
2562 }
2563
2564 return 0;
2565 }
2566
2567 /* This is to be called by the open routine before it does
2568 anything. */
2569
2570 void
2571 target_preopen (int from_tty)
2572 {
2573 dont_repeat ();
2574
2575 if (have_inferiors ())
2576 {
2577 if (!from_tty
2578 || !have_live_inferiors ()
2579 || query (_("A program is being debugged already. Kill it? ")))
2580 iterate_over_inferiors (dispose_inferior, NULL);
2581 else
2582 error (_("Program not killed."));
2583 }
2584
2585 /* Calling target_kill may remove the target from the stack. But if
2586 it doesn't (which seems like a win for UDI), remove it now. */
2587 /* Leave the exec target, though. The user may be switching from a
2588 live process to a core of the same program. */
2589 pop_all_targets_above (file_stratum);
2590
2591 target_pre_inferior (from_tty);
2592 }
2593
2594 /* Detach a target after doing deferred register stores. */
2595
2596 void
2597 target_detach (char *args, int from_tty)
2598 {
2599 struct target_ops* t;
2600
2601 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2602 /* Don't remove global breakpoints here. They're removed on
2603 disconnection from the target. */
2604 ;
2605 else
2606 /* If we're in breakpoints-always-inserted mode, have to remove
2607 them before detaching. */
2608 remove_breakpoints_pid (PIDGET (inferior_ptid));
2609
2610 prepare_for_detach ();
2611
2612 for (t = current_target.beneath; t != NULL; t = t->beneath)
2613 {
2614 if (t->to_detach != NULL)
2615 {
2616 t->to_detach (t, args, from_tty);
2617 if (targetdebug)
2618 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2619 args, from_tty);
2620 return;
2621 }
2622 }
2623
2624 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2625 }
2626
2627 void
2628 target_disconnect (char *args, int from_tty)
2629 {
2630 struct target_ops *t;
2631
2632 /* If we're in breakpoints-always-inserted mode or if breakpoints
2633 are global across processes, we have to remove them before
2634 disconnecting. */
2635 remove_breakpoints ();
2636
2637 for (t = current_target.beneath; t != NULL; t = t->beneath)
2638 if (t->to_disconnect != NULL)
2639 {
2640 if (targetdebug)
2641 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2642 args, from_tty);
2643 t->to_disconnect (t, args, from_tty);
2644 return;
2645 }
2646
2647 tcomplain ();
2648 }
2649
2650 ptid_t
2651 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2652 {
2653 struct target_ops *t;
2654
2655 for (t = current_target.beneath; t != NULL; t = t->beneath)
2656 {
2657 if (t->to_wait != NULL)
2658 {
2659 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2660
2661 if (targetdebug)
2662 {
2663 char *status_string;
2664 char *options_string;
2665
2666 status_string = target_waitstatus_to_string (status);
2667 options_string = target_options_to_string (options);
2668 fprintf_unfiltered (gdb_stdlog,
2669 "target_wait (%d, status, options={%s})"
2670 " = %d, %s\n",
2671 PIDGET (ptid), options_string,
2672 PIDGET (retval), status_string);
2673 xfree (status_string);
2674 xfree (options_string);
2675 }
2676
2677 return retval;
2678 }
2679 }
2680
2681 noprocess ();
2682 }
2683
2684 char *
2685 target_pid_to_str (ptid_t ptid)
2686 {
2687 struct target_ops *t;
2688
2689 for (t = current_target.beneath; t != NULL; t = t->beneath)
2690 {
2691 if (t->to_pid_to_str != NULL)
2692 return (*t->to_pid_to_str) (t, ptid);
2693 }
2694
2695 return normal_pid_to_str (ptid);
2696 }
2697
2698 char *
2699 target_thread_name (struct thread_info *info)
2700 {
2701 struct target_ops *t;
2702
2703 for (t = current_target.beneath; t != NULL; t = t->beneath)
2704 {
2705 if (t->to_thread_name != NULL)
2706 return (*t->to_thread_name) (info);
2707 }
2708
2709 return NULL;
2710 }
2711
2712 void
2713 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2714 {
2715 struct target_ops *t;
2716
2717 target_dcache_invalidate ();
2718
2719 for (t = current_target.beneath; t != NULL; t = t->beneath)
2720 {
2721 if (t->to_resume != NULL)
2722 {
2723 t->to_resume (t, ptid, step, signal);
2724 if (targetdebug)
2725 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2726 PIDGET (ptid),
2727 step ? "step" : "continue",
2728 gdb_signal_to_name (signal));
2729
2730 registers_changed_ptid (ptid);
2731 set_executing (ptid, 1);
2732 set_running (ptid, 1);
2733 clear_inline_frame_state (ptid);
2734 return;
2735 }
2736 }
2737
2738 noprocess ();
2739 }
2740
2741 void
2742 target_pass_signals (int numsigs, unsigned char *pass_signals)
2743 {
2744 struct target_ops *t;
2745
2746 for (t = current_target.beneath; t != NULL; t = t->beneath)
2747 {
2748 if (t->to_pass_signals != NULL)
2749 {
2750 if (targetdebug)
2751 {
2752 int i;
2753
2754 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2755 numsigs);
2756
2757 for (i = 0; i < numsigs; i++)
2758 if (pass_signals[i])
2759 fprintf_unfiltered (gdb_stdlog, " %s",
2760 gdb_signal_to_name (i));
2761
2762 fprintf_unfiltered (gdb_stdlog, " })\n");
2763 }
2764
2765 (*t->to_pass_signals) (numsigs, pass_signals);
2766 return;
2767 }
2768 }
2769 }
2770
2771 void
2772 target_program_signals (int numsigs, unsigned char *program_signals)
2773 {
2774 struct target_ops *t;
2775
2776 for (t = current_target.beneath; t != NULL; t = t->beneath)
2777 {
2778 if (t->to_program_signals != NULL)
2779 {
2780 if (targetdebug)
2781 {
2782 int i;
2783
2784 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2785 numsigs);
2786
2787 for (i = 0; i < numsigs; i++)
2788 if (program_signals[i])
2789 fprintf_unfiltered (gdb_stdlog, " %s",
2790 gdb_signal_to_name (i));
2791
2792 fprintf_unfiltered (gdb_stdlog, " })\n");
2793 }
2794
2795 (*t->to_program_signals) (numsigs, program_signals);
2796 return;
2797 }
2798 }
2799 }
2800
2801 /* Look through the list of possible targets for a target that can
2802 follow forks. */
2803
2804 int
2805 target_follow_fork (int follow_child)
2806 {
2807 struct target_ops *t;
2808
2809 for (t = current_target.beneath; t != NULL; t = t->beneath)
2810 {
2811 if (t->to_follow_fork != NULL)
2812 {
2813 int retval = t->to_follow_fork (t, follow_child);
2814
2815 if (targetdebug)
2816 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2817 follow_child, retval);
2818 return retval;
2819 }
2820 }
2821
2822 /* Some target returned a fork event, but did not know how to follow it. */
2823 internal_error (__FILE__, __LINE__,
2824 _("could not find a target to follow fork"));
2825 }
2826
2827 void
2828 target_mourn_inferior (void)
2829 {
2830 struct target_ops *t;
2831
2832 for (t = current_target.beneath; t != NULL; t = t->beneath)
2833 {
2834 if (t->to_mourn_inferior != NULL)
2835 {
2836 t->to_mourn_inferior (t);
2837 if (targetdebug)
2838 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2839
2840 /* We no longer need to keep handles on any of the object files.
2841 Make sure to release them to avoid unnecessarily locking any
2842 of them while we're not actually debugging. */
2843 bfd_cache_close_all ();
2844
2845 return;
2846 }
2847 }
2848
2849 internal_error (__FILE__, __LINE__,
2850 _("could not find a target to follow mourn inferior"));
2851 }
2852
2853 /* Look for a target which can describe architectural features, starting
2854 from TARGET. If we find one, return its description. */
2855
2856 const struct target_desc *
2857 target_read_description (struct target_ops *target)
2858 {
2859 struct target_ops *t;
2860
2861 for (t = target; t != NULL; t = t->beneath)
2862 if (t->to_read_description != NULL)
2863 {
2864 const struct target_desc *tdesc;
2865
2866 tdesc = t->to_read_description (t);
2867 if (tdesc)
2868 return tdesc;
2869 }
2870
2871 return NULL;
2872 }
2873
2874 /* The default implementation of to_search_memory.
2875 This implements a basic search of memory, reading target memory and
2876 performing the search here (as opposed to performing the search in on the
2877 target side with, for example, gdbserver). */
2878
2879 int
2880 simple_search_memory (struct target_ops *ops,
2881 CORE_ADDR start_addr, ULONGEST search_space_len,
2882 const gdb_byte *pattern, ULONGEST pattern_len,
2883 CORE_ADDR *found_addrp)
2884 {
2885 /* NOTE: also defined in find.c testcase. */
2886 #define SEARCH_CHUNK_SIZE 16000
2887 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2888 /* Buffer to hold memory contents for searching. */
2889 gdb_byte *search_buf;
2890 unsigned search_buf_size;
2891 struct cleanup *old_cleanups;
2892
2893 search_buf_size = chunk_size + pattern_len - 1;
2894
2895 /* No point in trying to allocate a buffer larger than the search space. */
2896 if (search_space_len < search_buf_size)
2897 search_buf_size = search_space_len;
2898
2899 search_buf = malloc (search_buf_size);
2900 if (search_buf == NULL)
2901 error (_("Unable to allocate memory to perform the search."));
2902 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2903
2904 /* Prime the search buffer. */
2905
2906 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2907 search_buf, start_addr, search_buf_size) != search_buf_size)
2908 {
2909 warning (_("Unable to access %s bytes of target "
2910 "memory at %s, halting search."),
2911 pulongest (search_buf_size), hex_string (start_addr));
2912 do_cleanups (old_cleanups);
2913 return -1;
2914 }
2915
2916 /* Perform the search.
2917
2918 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2919 When we've scanned N bytes we copy the trailing bytes to the start and
2920 read in another N bytes. */
2921
2922 while (search_space_len >= pattern_len)
2923 {
2924 gdb_byte *found_ptr;
2925 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2926
2927 found_ptr = memmem (search_buf, nr_search_bytes,
2928 pattern, pattern_len);
2929
2930 if (found_ptr != NULL)
2931 {
2932 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2933
2934 *found_addrp = found_addr;
2935 do_cleanups (old_cleanups);
2936 return 1;
2937 }
2938
2939 /* Not found in this chunk, skip to next chunk. */
2940
2941 /* Don't let search_space_len wrap here, it's unsigned. */
2942 if (search_space_len >= chunk_size)
2943 search_space_len -= chunk_size;
2944 else
2945 search_space_len = 0;
2946
2947 if (search_space_len >= pattern_len)
2948 {
2949 unsigned keep_len = search_buf_size - chunk_size;
2950 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2951 int nr_to_read;
2952
2953 /* Copy the trailing part of the previous iteration to the front
2954 of the buffer for the next iteration. */
2955 gdb_assert (keep_len == pattern_len - 1);
2956 memcpy (search_buf, search_buf + chunk_size, keep_len);
2957
2958 nr_to_read = min (search_space_len - keep_len, chunk_size);
2959
2960 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2961 search_buf + keep_len, read_addr,
2962 nr_to_read) != nr_to_read)
2963 {
2964 warning (_("Unable to access %s bytes of target "
2965 "memory at %s, halting search."),
2966 plongest (nr_to_read),
2967 hex_string (read_addr));
2968 do_cleanups (old_cleanups);
2969 return -1;
2970 }
2971
2972 start_addr += chunk_size;
2973 }
2974 }
2975
2976 /* Not found. */
2977
2978 do_cleanups (old_cleanups);
2979 return 0;
2980 }
2981
2982 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2983 sequence of bytes in PATTERN with length PATTERN_LEN.
2984
2985 The result is 1 if found, 0 if not found, and -1 if there was an error
2986 requiring halting of the search (e.g. memory read error).
2987 If the pattern is found the address is recorded in FOUND_ADDRP. */
2988
2989 int
2990 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2991 const gdb_byte *pattern, ULONGEST pattern_len,
2992 CORE_ADDR *found_addrp)
2993 {
2994 struct target_ops *t;
2995 int found;
2996
2997 /* We don't use INHERIT to set current_target.to_search_memory,
2998 so we have to scan the target stack and handle targetdebug
2999 ourselves. */
3000
3001 if (targetdebug)
3002 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3003 hex_string (start_addr));
3004
3005 for (t = current_target.beneath; t != NULL; t = t->beneath)
3006 if (t->to_search_memory != NULL)
3007 break;
3008
3009 if (t != NULL)
3010 {
3011 found = t->to_search_memory (t, start_addr, search_space_len,
3012 pattern, pattern_len, found_addrp);
3013 }
3014 else
3015 {
3016 /* If a special version of to_search_memory isn't available, use the
3017 simple version. */
3018 found = simple_search_memory (current_target.beneath,
3019 start_addr, search_space_len,
3020 pattern, pattern_len, found_addrp);
3021 }
3022
3023 if (targetdebug)
3024 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3025
3026 return found;
3027 }
3028
3029 /* Look through the currently pushed targets. If none of them will
3030 be able to restart the currently running process, issue an error
3031 message. */
3032
3033 void
3034 target_require_runnable (void)
3035 {
3036 struct target_ops *t;
3037
3038 for (t = target_stack; t != NULL; t = t->beneath)
3039 {
3040 /* If this target knows how to create a new program, then
3041 assume we will still be able to after killing the current
3042 one. Either killing and mourning will not pop T, or else
3043 find_default_run_target will find it again. */
3044 if (t->to_create_inferior != NULL)
3045 return;
3046
3047 /* Do not worry about thread_stratum targets that can not
3048 create inferiors. Assume they will be pushed again if
3049 necessary, and continue to the process_stratum. */
3050 if (t->to_stratum == thread_stratum
3051 || t->to_stratum == arch_stratum)
3052 continue;
3053
3054 error (_("The \"%s\" target does not support \"run\". "
3055 "Try \"help target\" or \"continue\"."),
3056 t->to_shortname);
3057 }
3058
3059 /* This function is only called if the target is running. In that
3060 case there should have been a process_stratum target and it
3061 should either know how to create inferiors, or not... */
3062 internal_error (__FILE__, __LINE__, _("No targets found"));
3063 }
3064
3065 /* Look through the list of possible targets for a target that can
3066 execute a run or attach command without any other data. This is
3067 used to locate the default process stratum.
3068
3069 If DO_MESG is not NULL, the result is always valid (error() is
3070 called for errors); else, return NULL on error. */
3071
3072 static struct target_ops *
3073 find_default_run_target (char *do_mesg)
3074 {
3075 struct target_ops **t;
3076 struct target_ops *runable = NULL;
3077 int count;
3078
3079 count = 0;
3080
3081 for (t = target_structs; t < target_structs + target_struct_size;
3082 ++t)
3083 {
3084 if ((*t)->to_can_run && target_can_run (*t))
3085 {
3086 runable = *t;
3087 ++count;
3088 }
3089 }
3090
3091 if (count != 1)
3092 {
3093 if (do_mesg)
3094 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3095 else
3096 return NULL;
3097 }
3098
3099 return runable;
3100 }
3101
3102 void
3103 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3104 {
3105 struct target_ops *t;
3106
3107 t = find_default_run_target ("attach");
3108 (t->to_attach) (t, args, from_tty);
3109 return;
3110 }
3111
3112 void
3113 find_default_create_inferior (struct target_ops *ops,
3114 char *exec_file, char *allargs, char **env,
3115 int from_tty)
3116 {
3117 struct target_ops *t;
3118
3119 t = find_default_run_target ("run");
3120 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3121 return;
3122 }
3123
3124 static int
3125 find_default_can_async_p (void)
3126 {
3127 struct target_ops *t;
3128
3129 /* This may be called before the target is pushed on the stack;
3130 look for the default process stratum. If there's none, gdb isn't
3131 configured with a native debugger, and target remote isn't
3132 connected yet. */
3133 t = find_default_run_target (NULL);
3134 if (t && t->to_can_async_p)
3135 return (t->to_can_async_p) ();
3136 return 0;
3137 }
3138
3139 static int
3140 find_default_is_async_p (void)
3141 {
3142 struct target_ops *t;
3143
3144 /* This may be called before the target is pushed on the stack;
3145 look for the default process stratum. If there's none, gdb isn't
3146 configured with a native debugger, and target remote isn't
3147 connected yet. */
3148 t = find_default_run_target (NULL);
3149 if (t && t->to_is_async_p)
3150 return (t->to_is_async_p) ();
3151 return 0;
3152 }
3153
3154 static int
3155 find_default_supports_non_stop (void)
3156 {
3157 struct target_ops *t;
3158
3159 t = find_default_run_target (NULL);
3160 if (t && t->to_supports_non_stop)
3161 return (t->to_supports_non_stop) ();
3162 return 0;
3163 }
3164
3165 int
3166 target_supports_non_stop (void)
3167 {
3168 struct target_ops *t;
3169
3170 for (t = &current_target; t != NULL; t = t->beneath)
3171 if (t->to_supports_non_stop)
3172 return t->to_supports_non_stop ();
3173
3174 return 0;
3175 }
3176
3177 /* Implement the "info proc" command. */
3178
3179 int
3180 target_info_proc (char *args, enum info_proc_what what)
3181 {
3182 struct target_ops *t;
3183
3184 /* If we're already connected to something that can get us OS
3185 related data, use it. Otherwise, try using the native
3186 target. */
3187 if (current_target.to_stratum >= process_stratum)
3188 t = current_target.beneath;
3189 else
3190 t = find_default_run_target (NULL);
3191
3192 for (; t != NULL; t = t->beneath)
3193 {
3194 if (t->to_info_proc != NULL)
3195 {
3196 t->to_info_proc (t, args, what);
3197
3198 if (targetdebug)
3199 fprintf_unfiltered (gdb_stdlog,
3200 "target_info_proc (\"%s\", %d)\n", args, what);
3201
3202 return 1;
3203 }
3204 }
3205
3206 return 0;
3207 }
3208
3209 static int
3210 find_default_supports_disable_randomization (void)
3211 {
3212 struct target_ops *t;
3213
3214 t = find_default_run_target (NULL);
3215 if (t && t->to_supports_disable_randomization)
3216 return (t->to_supports_disable_randomization) ();
3217 return 0;
3218 }
3219
3220 int
3221 target_supports_disable_randomization (void)
3222 {
3223 struct target_ops *t;
3224
3225 for (t = &current_target; t != NULL; t = t->beneath)
3226 if (t->to_supports_disable_randomization)
3227 return t->to_supports_disable_randomization ();
3228
3229 return 0;
3230 }
3231
3232 char *
3233 target_get_osdata (const char *type)
3234 {
3235 struct target_ops *t;
3236
3237 /* If we're already connected to something that can get us OS
3238 related data, use it. Otherwise, try using the native
3239 target. */
3240 if (current_target.to_stratum >= process_stratum)
3241 t = current_target.beneath;
3242 else
3243 t = find_default_run_target ("get OS data");
3244
3245 if (!t)
3246 return NULL;
3247
3248 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3249 }
3250
3251 /* Determine the current address space of thread PTID. */
3252
3253 struct address_space *
3254 target_thread_address_space (ptid_t ptid)
3255 {
3256 struct address_space *aspace;
3257 struct inferior *inf;
3258 struct target_ops *t;
3259
3260 for (t = current_target.beneath; t != NULL; t = t->beneath)
3261 {
3262 if (t->to_thread_address_space != NULL)
3263 {
3264 aspace = t->to_thread_address_space (t, ptid);
3265 gdb_assert (aspace);
3266
3267 if (targetdebug)
3268 fprintf_unfiltered (gdb_stdlog,
3269 "target_thread_address_space (%s) = %d\n",
3270 target_pid_to_str (ptid),
3271 address_space_num (aspace));
3272 return aspace;
3273 }
3274 }
3275
3276 /* Fall-back to the "main" address space of the inferior. */
3277 inf = find_inferior_pid (ptid_get_pid (ptid));
3278
3279 if (inf == NULL || inf->aspace == NULL)
3280 internal_error (__FILE__, __LINE__,
3281 _("Can't determine the current "
3282 "address space of thread %s\n"),
3283 target_pid_to_str (ptid));
3284
3285 return inf->aspace;
3286 }
3287
3288
3289 /* Target file operations. */
3290
3291 static struct target_ops *
3292 default_fileio_target (void)
3293 {
3294 /* If we're already connected to something that can perform
3295 file I/O, use it. Otherwise, try using the native target. */
3296 if (current_target.to_stratum >= process_stratum)
3297 return current_target.beneath;
3298 else
3299 return find_default_run_target ("file I/O");
3300 }
3301
3302 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3303 target file descriptor, or -1 if an error occurs (and set
3304 *TARGET_ERRNO). */
3305 int
3306 target_fileio_open (const char *filename, int flags, int mode,
3307 int *target_errno)
3308 {
3309 struct target_ops *t;
3310
3311 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3312 {
3313 if (t->to_fileio_open != NULL)
3314 {
3315 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3316
3317 if (targetdebug)
3318 fprintf_unfiltered (gdb_stdlog,
3319 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3320 filename, flags, mode,
3321 fd, fd != -1 ? 0 : *target_errno);
3322 return fd;
3323 }
3324 }
3325
3326 *target_errno = FILEIO_ENOSYS;
3327 return -1;
3328 }
3329
3330 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3331 Return the number of bytes written, or -1 if an error occurs
3332 (and set *TARGET_ERRNO). */
3333 int
3334 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3335 ULONGEST offset, int *target_errno)
3336 {
3337 struct target_ops *t;
3338
3339 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3340 {
3341 if (t->to_fileio_pwrite != NULL)
3342 {
3343 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3344 target_errno);
3345
3346 if (targetdebug)
3347 fprintf_unfiltered (gdb_stdlog,
3348 "target_fileio_pwrite (%d,...,%d,%s) "
3349 "= %d (%d)\n",
3350 fd, len, pulongest (offset),
3351 ret, ret != -1 ? 0 : *target_errno);
3352 return ret;
3353 }
3354 }
3355
3356 *target_errno = FILEIO_ENOSYS;
3357 return -1;
3358 }
3359
3360 /* Read up to LEN bytes FD on the target into READ_BUF.
3361 Return the number of bytes read, or -1 if an error occurs
3362 (and set *TARGET_ERRNO). */
3363 int
3364 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3365 ULONGEST offset, int *target_errno)
3366 {
3367 struct target_ops *t;
3368
3369 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3370 {
3371 if (t->to_fileio_pread != NULL)
3372 {
3373 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3374 target_errno);
3375
3376 if (targetdebug)
3377 fprintf_unfiltered (gdb_stdlog,
3378 "target_fileio_pread (%d,...,%d,%s) "
3379 "= %d (%d)\n",
3380 fd, len, pulongest (offset),
3381 ret, ret != -1 ? 0 : *target_errno);
3382 return ret;
3383 }
3384 }
3385
3386 *target_errno = FILEIO_ENOSYS;
3387 return -1;
3388 }
3389
3390 /* Close FD on the target. Return 0, or -1 if an error occurs
3391 (and set *TARGET_ERRNO). */
3392 int
3393 target_fileio_close (int fd, int *target_errno)
3394 {
3395 struct target_ops *t;
3396
3397 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3398 {
3399 if (t->to_fileio_close != NULL)
3400 {
3401 int ret = t->to_fileio_close (fd, target_errno);
3402
3403 if (targetdebug)
3404 fprintf_unfiltered (gdb_stdlog,
3405 "target_fileio_close (%d) = %d (%d)\n",
3406 fd, ret, ret != -1 ? 0 : *target_errno);
3407 return ret;
3408 }
3409 }
3410
3411 *target_errno = FILEIO_ENOSYS;
3412 return -1;
3413 }
3414
3415 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3416 occurs (and set *TARGET_ERRNO). */
3417 int
3418 target_fileio_unlink (const char *filename, int *target_errno)
3419 {
3420 struct target_ops *t;
3421
3422 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3423 {
3424 if (t->to_fileio_unlink != NULL)
3425 {
3426 int ret = t->to_fileio_unlink (filename, target_errno);
3427
3428 if (targetdebug)
3429 fprintf_unfiltered (gdb_stdlog,
3430 "target_fileio_unlink (%s) = %d (%d)\n",
3431 filename, ret, ret != -1 ? 0 : *target_errno);
3432 return ret;
3433 }
3434 }
3435
3436 *target_errno = FILEIO_ENOSYS;
3437 return -1;
3438 }
3439
3440 /* Read value of symbolic link FILENAME on the target. Return a
3441 null-terminated string allocated via xmalloc, or NULL if an error
3442 occurs (and set *TARGET_ERRNO). */
3443 char *
3444 target_fileio_readlink (const char *filename, int *target_errno)
3445 {
3446 struct target_ops *t;
3447
3448 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3449 {
3450 if (t->to_fileio_readlink != NULL)
3451 {
3452 char *ret = t->to_fileio_readlink (filename, target_errno);
3453
3454 if (targetdebug)
3455 fprintf_unfiltered (gdb_stdlog,
3456 "target_fileio_readlink (%s) = %s (%d)\n",
3457 filename, ret? ret : "(nil)",
3458 ret? 0 : *target_errno);
3459 return ret;
3460 }
3461 }
3462
3463 *target_errno = FILEIO_ENOSYS;
3464 return NULL;
3465 }
3466
3467 static void
3468 target_fileio_close_cleanup (void *opaque)
3469 {
3470 int fd = *(int *) opaque;
3471 int target_errno;
3472
3473 target_fileio_close (fd, &target_errno);
3474 }
3475
3476 /* Read target file FILENAME. Store the result in *BUF_P and
3477 return the size of the transferred data. PADDING additional bytes are
3478 available in *BUF_P. This is a helper function for
3479 target_fileio_read_alloc; see the declaration of that function for more
3480 information. */
3481
3482 static LONGEST
3483 target_fileio_read_alloc_1 (const char *filename,
3484 gdb_byte **buf_p, int padding)
3485 {
3486 struct cleanup *close_cleanup;
3487 size_t buf_alloc, buf_pos;
3488 gdb_byte *buf;
3489 LONGEST n;
3490 int fd;
3491 int target_errno;
3492
3493 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3494 if (fd == -1)
3495 return -1;
3496
3497 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3498
3499 /* Start by reading up to 4K at a time. The target will throttle
3500 this number down if necessary. */
3501 buf_alloc = 4096;
3502 buf = xmalloc (buf_alloc);
3503 buf_pos = 0;
3504 while (1)
3505 {
3506 n = target_fileio_pread (fd, &buf[buf_pos],
3507 buf_alloc - buf_pos - padding, buf_pos,
3508 &target_errno);
3509 if (n < 0)
3510 {
3511 /* An error occurred. */
3512 do_cleanups (close_cleanup);
3513 xfree (buf);
3514 return -1;
3515 }
3516 else if (n == 0)
3517 {
3518 /* Read all there was. */
3519 do_cleanups (close_cleanup);
3520 if (buf_pos == 0)
3521 xfree (buf);
3522 else
3523 *buf_p = buf;
3524 return buf_pos;
3525 }
3526
3527 buf_pos += n;
3528
3529 /* If the buffer is filling up, expand it. */
3530 if (buf_alloc < buf_pos * 2)
3531 {
3532 buf_alloc *= 2;
3533 buf = xrealloc (buf, buf_alloc);
3534 }
3535
3536 QUIT;
3537 }
3538 }
3539
3540 /* Read target file FILENAME. Store the result in *BUF_P and return
3541 the size of the transferred data. See the declaration in "target.h"
3542 function for more information about the return value. */
3543
3544 LONGEST
3545 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3546 {
3547 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3548 }
3549
3550 /* Read target file FILENAME. The result is NUL-terminated and
3551 returned as a string, allocated using xmalloc. If an error occurs
3552 or the transfer is unsupported, NULL is returned. Empty objects
3553 are returned as allocated but empty strings. A warning is issued
3554 if the result contains any embedded NUL bytes. */
3555
3556 char *
3557 target_fileio_read_stralloc (const char *filename)
3558 {
3559 gdb_byte *buffer;
3560 char *bufstr;
3561 LONGEST i, transferred;
3562
3563 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3564 bufstr = (char *) buffer;
3565
3566 if (transferred < 0)
3567 return NULL;
3568
3569 if (transferred == 0)
3570 return xstrdup ("");
3571
3572 bufstr[transferred] = 0;
3573
3574 /* Check for embedded NUL bytes; but allow trailing NULs. */
3575 for (i = strlen (bufstr); i < transferred; i++)
3576 if (bufstr[i] != 0)
3577 {
3578 warning (_("target file %s "
3579 "contained unexpected null characters"),
3580 filename);
3581 break;
3582 }
3583
3584 return bufstr;
3585 }
3586
3587
3588 static int
3589 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3590 {
3591 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3592 }
3593
3594 static int
3595 default_watchpoint_addr_within_range (struct target_ops *target,
3596 CORE_ADDR addr,
3597 CORE_ADDR start, int length)
3598 {
3599 return addr >= start && addr < start + length;
3600 }
3601
3602 static struct gdbarch *
3603 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3604 {
3605 return target_gdbarch ();
3606 }
3607
3608 static int
3609 return_zero (void)
3610 {
3611 return 0;
3612 }
3613
3614 static int
3615 return_one (void)
3616 {
3617 return 1;
3618 }
3619
3620 static int
3621 return_minus_one (void)
3622 {
3623 return -1;
3624 }
3625
3626 /*
3627 * Find the next target down the stack from the specified target.
3628 */
3629
3630 struct target_ops *
3631 find_target_beneath (struct target_ops *t)
3632 {
3633 return t->beneath;
3634 }
3635
3636 \f
3637 /* The inferior process has died. Long live the inferior! */
3638
3639 void
3640 generic_mourn_inferior (void)
3641 {
3642 ptid_t ptid;
3643
3644 ptid = inferior_ptid;
3645 inferior_ptid = null_ptid;
3646
3647 /* Mark breakpoints uninserted in case something tries to delete a
3648 breakpoint while we delete the inferior's threads (which would
3649 fail, since the inferior is long gone). */
3650 mark_breakpoints_out ();
3651
3652 if (!ptid_equal (ptid, null_ptid))
3653 {
3654 int pid = ptid_get_pid (ptid);
3655 exit_inferior (pid);
3656 }
3657
3658 /* Note this wipes step-resume breakpoints, so needs to be done
3659 after exit_inferior, which ends up referencing the step-resume
3660 breakpoints through clear_thread_inferior_resources. */
3661 breakpoint_init_inferior (inf_exited);
3662
3663 registers_changed ();
3664
3665 reopen_exec_file ();
3666 reinit_frame_cache ();
3667
3668 if (deprecated_detach_hook)
3669 deprecated_detach_hook ();
3670 }
3671 \f
3672 /* Convert a normal process ID to a string. Returns the string in a
3673 static buffer. */
3674
3675 char *
3676 normal_pid_to_str (ptid_t ptid)
3677 {
3678 static char buf[32];
3679
3680 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3681 return buf;
3682 }
3683
3684 static char *
3685 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3686 {
3687 return normal_pid_to_str (ptid);
3688 }
3689
3690 /* Error-catcher for target_find_memory_regions. */
3691 static int
3692 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3693 {
3694 error (_("Command not implemented for this target."));
3695 return 0;
3696 }
3697
3698 /* Error-catcher for target_make_corefile_notes. */
3699 static char *
3700 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3701 {
3702 error (_("Command not implemented for this target."));
3703 return NULL;
3704 }
3705
3706 /* Error-catcher for target_get_bookmark. */
3707 static gdb_byte *
3708 dummy_get_bookmark (char *ignore1, int ignore2)
3709 {
3710 tcomplain ();
3711 return NULL;
3712 }
3713
3714 /* Error-catcher for target_goto_bookmark. */
3715 static void
3716 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3717 {
3718 tcomplain ();
3719 }
3720
3721 /* Set up the handful of non-empty slots needed by the dummy target
3722 vector. */
3723
3724 static void
3725 init_dummy_target (void)
3726 {
3727 dummy_target.to_shortname = "None";
3728 dummy_target.to_longname = "None";
3729 dummy_target.to_doc = "";
3730 dummy_target.to_attach = find_default_attach;
3731 dummy_target.to_detach =
3732 (void (*)(struct target_ops *, char *, int))target_ignore;
3733 dummy_target.to_create_inferior = find_default_create_inferior;
3734 dummy_target.to_can_async_p = find_default_can_async_p;
3735 dummy_target.to_is_async_p = find_default_is_async_p;
3736 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3737 dummy_target.to_supports_disable_randomization
3738 = find_default_supports_disable_randomization;
3739 dummy_target.to_pid_to_str = dummy_pid_to_str;
3740 dummy_target.to_stratum = dummy_stratum;
3741 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3742 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3743 dummy_target.to_get_bookmark = dummy_get_bookmark;
3744 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3745 dummy_target.to_xfer_partial = default_xfer_partial;
3746 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3747 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3748 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3749 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3750 dummy_target.to_has_execution
3751 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3752 dummy_target.to_stopped_by_watchpoint = return_zero;
3753 dummy_target.to_stopped_data_address =
3754 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3755 dummy_target.to_magic = OPS_MAGIC;
3756 }
3757 \f
3758 static void
3759 debug_to_open (char *args, int from_tty)
3760 {
3761 debug_target.to_open (args, from_tty);
3762
3763 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3764 }
3765
3766 void
3767 target_close (struct target_ops *targ)
3768 {
3769 gdb_assert (!target_is_pushed (targ));
3770
3771 if (targ->to_xclose != NULL)
3772 targ->to_xclose (targ);
3773 else if (targ->to_close != NULL)
3774 targ->to_close ();
3775
3776 if (targetdebug)
3777 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3778 }
3779
3780 void
3781 target_attach (char *args, int from_tty)
3782 {
3783 struct target_ops *t;
3784
3785 for (t = current_target.beneath; t != NULL; t = t->beneath)
3786 {
3787 if (t->to_attach != NULL)
3788 {
3789 t->to_attach (t, args, from_tty);
3790 if (targetdebug)
3791 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3792 args, from_tty);
3793 return;
3794 }
3795 }
3796
3797 internal_error (__FILE__, __LINE__,
3798 _("could not find a target to attach"));
3799 }
3800
3801 int
3802 target_thread_alive (ptid_t ptid)
3803 {
3804 struct target_ops *t;
3805
3806 for (t = current_target.beneath; t != NULL; t = t->beneath)
3807 {
3808 if (t->to_thread_alive != NULL)
3809 {
3810 int retval;
3811
3812 retval = t->to_thread_alive (t, ptid);
3813 if (targetdebug)
3814 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3815 PIDGET (ptid), retval);
3816
3817 return retval;
3818 }
3819 }
3820
3821 return 0;
3822 }
3823
3824 void
3825 target_find_new_threads (void)
3826 {
3827 struct target_ops *t;
3828
3829 for (t = current_target.beneath; t != NULL; t = t->beneath)
3830 {
3831 if (t->to_find_new_threads != NULL)
3832 {
3833 t->to_find_new_threads (t);
3834 if (targetdebug)
3835 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3836
3837 return;
3838 }
3839 }
3840 }
3841
3842 void
3843 target_stop (ptid_t ptid)
3844 {
3845 if (!may_stop)
3846 {
3847 warning (_("May not interrupt or stop the target, ignoring attempt"));
3848 return;
3849 }
3850
3851 (*current_target.to_stop) (ptid);
3852 }
3853
3854 static void
3855 debug_to_post_attach (int pid)
3856 {
3857 debug_target.to_post_attach (pid);
3858
3859 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3860 }
3861
3862 /* Concatenate ELEM to LIST, a comma separate list, and return the
3863 result. The LIST incoming argument is released. */
3864
3865 static char *
3866 str_comma_list_concat_elem (char *list, const char *elem)
3867 {
3868 if (list == NULL)
3869 return xstrdup (elem);
3870 else
3871 return reconcat (list, list, ", ", elem, (char *) NULL);
3872 }
3873
3874 /* Helper for target_options_to_string. If OPT is present in
3875 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3876 Returns the new resulting string. OPT is removed from
3877 TARGET_OPTIONS. */
3878
3879 static char *
3880 do_option (int *target_options, char *ret,
3881 int opt, char *opt_str)
3882 {
3883 if ((*target_options & opt) != 0)
3884 {
3885 ret = str_comma_list_concat_elem (ret, opt_str);
3886 *target_options &= ~opt;
3887 }
3888
3889 return ret;
3890 }
3891
3892 char *
3893 target_options_to_string (int target_options)
3894 {
3895 char *ret = NULL;
3896
3897 #define DO_TARG_OPTION(OPT) \
3898 ret = do_option (&target_options, ret, OPT, #OPT)
3899
3900 DO_TARG_OPTION (TARGET_WNOHANG);
3901
3902 if (target_options != 0)
3903 ret = str_comma_list_concat_elem (ret, "unknown???");
3904
3905 if (ret == NULL)
3906 ret = xstrdup ("");
3907 return ret;
3908 }
3909
3910 static void
3911 debug_print_register (const char * func,
3912 struct regcache *regcache, int regno)
3913 {
3914 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3915
3916 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3917 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3918 && gdbarch_register_name (gdbarch, regno) != NULL
3919 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3920 fprintf_unfiltered (gdb_stdlog, "(%s)",
3921 gdbarch_register_name (gdbarch, regno));
3922 else
3923 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3924 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3925 {
3926 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3927 int i, size = register_size (gdbarch, regno);
3928 gdb_byte buf[MAX_REGISTER_SIZE];
3929
3930 regcache_raw_collect (regcache, regno, buf);
3931 fprintf_unfiltered (gdb_stdlog, " = ");
3932 for (i = 0; i < size; i++)
3933 {
3934 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3935 }
3936 if (size <= sizeof (LONGEST))
3937 {
3938 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3939
3940 fprintf_unfiltered (gdb_stdlog, " %s %s",
3941 core_addr_to_string_nz (val), plongest (val));
3942 }
3943 }
3944 fprintf_unfiltered (gdb_stdlog, "\n");
3945 }
3946
3947 void
3948 target_fetch_registers (struct regcache *regcache, int regno)
3949 {
3950 struct target_ops *t;
3951
3952 for (t = current_target.beneath; t != NULL; t = t->beneath)
3953 {
3954 if (t->to_fetch_registers != NULL)
3955 {
3956 t->to_fetch_registers (t, regcache, regno);
3957 if (targetdebug)
3958 debug_print_register ("target_fetch_registers", regcache, regno);
3959 return;
3960 }
3961 }
3962 }
3963
3964 void
3965 target_store_registers (struct regcache *regcache, int regno)
3966 {
3967 struct target_ops *t;
3968
3969 if (!may_write_registers)
3970 error (_("Writing to registers is not allowed (regno %d)"), regno);
3971
3972 for (t = current_target.beneath; t != NULL; t = t->beneath)
3973 {
3974 if (t->to_store_registers != NULL)
3975 {
3976 t->to_store_registers (t, regcache, regno);
3977 if (targetdebug)
3978 {
3979 debug_print_register ("target_store_registers", regcache, regno);
3980 }
3981 return;
3982 }
3983 }
3984
3985 noprocess ();
3986 }
3987
3988 int
3989 target_core_of_thread (ptid_t ptid)
3990 {
3991 struct target_ops *t;
3992
3993 for (t = current_target.beneath; t != NULL; t = t->beneath)
3994 {
3995 if (t->to_core_of_thread != NULL)
3996 {
3997 int retval = t->to_core_of_thread (t, ptid);
3998
3999 if (targetdebug)
4000 fprintf_unfiltered (gdb_stdlog,
4001 "target_core_of_thread (%d) = %d\n",
4002 PIDGET (ptid), retval);
4003 return retval;
4004 }
4005 }
4006
4007 return -1;
4008 }
4009
4010 int
4011 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4012 {
4013 struct target_ops *t;
4014
4015 for (t = current_target.beneath; t != NULL; t = t->beneath)
4016 {
4017 if (t->to_verify_memory != NULL)
4018 {
4019 int retval = t->to_verify_memory (t, data, memaddr, size);
4020
4021 if (targetdebug)
4022 fprintf_unfiltered (gdb_stdlog,
4023 "target_verify_memory (%s, %s) = %d\n",
4024 paddress (target_gdbarch (), memaddr),
4025 pulongest (size),
4026 retval);
4027 return retval;
4028 }
4029 }
4030
4031 tcomplain ();
4032 }
4033
4034 /* The documentation for this function is in its prototype declaration in
4035 target.h. */
4036
4037 int
4038 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4039 {
4040 struct target_ops *t;
4041
4042 for (t = current_target.beneath; t != NULL; t = t->beneath)
4043 if (t->to_insert_mask_watchpoint != NULL)
4044 {
4045 int ret;
4046
4047 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4048
4049 if (targetdebug)
4050 fprintf_unfiltered (gdb_stdlog, "\
4051 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4052 core_addr_to_string (addr),
4053 core_addr_to_string (mask), rw, ret);
4054
4055 return ret;
4056 }
4057
4058 return 1;
4059 }
4060
4061 /* The documentation for this function is in its prototype declaration in
4062 target.h. */
4063
4064 int
4065 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4066 {
4067 struct target_ops *t;
4068
4069 for (t = current_target.beneath; t != NULL; t = t->beneath)
4070 if (t->to_remove_mask_watchpoint != NULL)
4071 {
4072 int ret;
4073
4074 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4075
4076 if (targetdebug)
4077 fprintf_unfiltered (gdb_stdlog, "\
4078 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4079 core_addr_to_string (addr),
4080 core_addr_to_string (mask), rw, ret);
4081
4082 return ret;
4083 }
4084
4085 return 1;
4086 }
4087
4088 /* The documentation for this function is in its prototype declaration
4089 in target.h. */
4090
4091 int
4092 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4093 {
4094 struct target_ops *t;
4095
4096 for (t = current_target.beneath; t != NULL; t = t->beneath)
4097 if (t->to_masked_watch_num_registers != NULL)
4098 return t->to_masked_watch_num_registers (t, addr, mask);
4099
4100 return -1;
4101 }
4102
4103 /* The documentation for this function is in its prototype declaration
4104 in target.h. */
4105
4106 int
4107 target_ranged_break_num_registers (void)
4108 {
4109 struct target_ops *t;
4110
4111 for (t = current_target.beneath; t != NULL; t = t->beneath)
4112 if (t->to_ranged_break_num_registers != NULL)
4113 return t->to_ranged_break_num_registers (t);
4114
4115 return -1;
4116 }
4117
4118 /* See target.h. */
4119
4120 int
4121 target_supports_btrace (void)
4122 {
4123 struct target_ops *t;
4124
4125 for (t = current_target.beneath; t != NULL; t = t->beneath)
4126 if (t->to_supports_btrace != NULL)
4127 return t->to_supports_btrace ();
4128
4129 return 0;
4130 }
4131
4132 /* See target.h. */
4133
4134 struct btrace_target_info *
4135 target_enable_btrace (ptid_t ptid)
4136 {
4137 struct target_ops *t;
4138
4139 for (t = current_target.beneath; t != NULL; t = t->beneath)
4140 if (t->to_enable_btrace != NULL)
4141 return t->to_enable_btrace (ptid);
4142
4143 tcomplain ();
4144 return NULL;
4145 }
4146
4147 /* See target.h. */
4148
4149 void
4150 target_disable_btrace (struct btrace_target_info *btinfo)
4151 {
4152 struct target_ops *t;
4153
4154 for (t = current_target.beneath; t != NULL; t = t->beneath)
4155 if (t->to_disable_btrace != NULL)
4156 return t->to_disable_btrace (btinfo);
4157
4158 tcomplain ();
4159 }
4160
4161 /* See target.h. */
4162
4163 void
4164 target_teardown_btrace (struct btrace_target_info *btinfo)
4165 {
4166 struct target_ops *t;
4167
4168 for (t = current_target.beneath; t != NULL; t = t->beneath)
4169 if (t->to_teardown_btrace != NULL)
4170 return t->to_teardown_btrace (btinfo);
4171
4172 tcomplain ();
4173 }
4174
4175 /* See target.h. */
4176
4177 VEC (btrace_block_s) *
4178 target_read_btrace (struct btrace_target_info *btinfo,
4179 enum btrace_read_type type)
4180 {
4181 struct target_ops *t;
4182
4183 for (t = current_target.beneath; t != NULL; t = t->beneath)
4184 if (t->to_read_btrace != NULL)
4185 return t->to_read_btrace (btinfo, type);
4186
4187 tcomplain ();
4188 return NULL;
4189 }
4190
4191 /* See target.h. */
4192
4193 void
4194 target_stop_recording (void)
4195 {
4196 struct target_ops *t;
4197
4198 for (t = current_target.beneath; t != NULL; t = t->beneath)
4199 if (t->to_stop_recording != NULL)
4200 {
4201 t->to_stop_recording ();
4202 return;
4203 }
4204
4205 /* This is optional. */
4206 }
4207
4208 /* See target.h. */
4209
4210 void
4211 target_info_record (void)
4212 {
4213 struct target_ops *t;
4214
4215 for (t = current_target.beneath; t != NULL; t = t->beneath)
4216 if (t->to_info_record != NULL)
4217 {
4218 t->to_info_record ();
4219 return;
4220 }
4221
4222 tcomplain ();
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_save_record (const char *filename)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_save_record != NULL)
4234 {
4235 t->to_save_record (filename);
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 int
4245 target_supports_delete_record (void)
4246 {
4247 struct target_ops *t;
4248
4249 for (t = current_target.beneath; t != NULL; t = t->beneath)
4250 if (t->to_delete_record != NULL)
4251 return 1;
4252
4253 return 0;
4254 }
4255
4256 /* See target.h. */
4257
4258 void
4259 target_delete_record (void)
4260 {
4261 struct target_ops *t;
4262
4263 for (t = current_target.beneath; t != NULL; t = t->beneath)
4264 if (t->to_delete_record != NULL)
4265 {
4266 t->to_delete_record ();
4267 return;
4268 }
4269
4270 tcomplain ();
4271 }
4272
4273 /* See target.h. */
4274
4275 int
4276 target_record_is_replaying (void)
4277 {
4278 struct target_ops *t;
4279
4280 for (t = current_target.beneath; t != NULL; t = t->beneath)
4281 if (t->to_record_is_replaying != NULL)
4282 return t->to_record_is_replaying ();
4283
4284 return 0;
4285 }
4286
4287 /* See target.h. */
4288
4289 void
4290 target_goto_record_begin (void)
4291 {
4292 struct target_ops *t;
4293
4294 for (t = current_target.beneath; t != NULL; t = t->beneath)
4295 if (t->to_goto_record_begin != NULL)
4296 {
4297 t->to_goto_record_begin ();
4298 return;
4299 }
4300
4301 tcomplain ();
4302 }
4303
4304 /* See target.h. */
4305
4306 void
4307 target_goto_record_end (void)
4308 {
4309 struct target_ops *t;
4310
4311 for (t = current_target.beneath; t != NULL; t = t->beneath)
4312 if (t->to_goto_record_end != NULL)
4313 {
4314 t->to_goto_record_end ();
4315 return;
4316 }
4317
4318 tcomplain ();
4319 }
4320
4321 /* See target.h. */
4322
4323 void
4324 target_goto_record (ULONGEST insn)
4325 {
4326 struct target_ops *t;
4327
4328 for (t = current_target.beneath; t != NULL; t = t->beneath)
4329 if (t->to_goto_record != NULL)
4330 {
4331 t->to_goto_record (insn);
4332 return;
4333 }
4334
4335 tcomplain ();
4336 }
4337
4338 /* See target.h. */
4339
4340 void
4341 target_insn_history (int size, int flags)
4342 {
4343 struct target_ops *t;
4344
4345 for (t = current_target.beneath; t != NULL; t = t->beneath)
4346 if (t->to_insn_history != NULL)
4347 {
4348 t->to_insn_history (size, flags);
4349 return;
4350 }
4351
4352 tcomplain ();
4353 }
4354
4355 /* See target.h. */
4356
4357 void
4358 target_insn_history_from (ULONGEST from, int size, int flags)
4359 {
4360 struct target_ops *t;
4361
4362 for (t = current_target.beneath; t != NULL; t = t->beneath)
4363 if (t->to_insn_history_from != NULL)
4364 {
4365 t->to_insn_history_from (from, size, flags);
4366 return;
4367 }
4368
4369 tcomplain ();
4370 }
4371
4372 /* See target.h. */
4373
4374 void
4375 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4376 {
4377 struct target_ops *t;
4378
4379 for (t = current_target.beneath; t != NULL; t = t->beneath)
4380 if (t->to_insn_history_range != NULL)
4381 {
4382 t->to_insn_history_range (begin, end, flags);
4383 return;
4384 }
4385
4386 tcomplain ();
4387 }
4388
4389 /* See target.h. */
4390
4391 void
4392 target_call_history (int size, int flags)
4393 {
4394 struct target_ops *t;
4395
4396 for (t = current_target.beneath; t != NULL; t = t->beneath)
4397 if (t->to_call_history != NULL)
4398 {
4399 t->to_call_history (size, flags);
4400 return;
4401 }
4402
4403 tcomplain ();
4404 }
4405
4406 /* See target.h. */
4407
4408 void
4409 target_call_history_from (ULONGEST begin, int size, int flags)
4410 {
4411 struct target_ops *t;
4412
4413 for (t = current_target.beneath; t != NULL; t = t->beneath)
4414 if (t->to_call_history_from != NULL)
4415 {
4416 t->to_call_history_from (begin, size, flags);
4417 return;
4418 }
4419
4420 tcomplain ();
4421 }
4422
4423 /* See target.h. */
4424
4425 void
4426 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4427 {
4428 struct target_ops *t;
4429
4430 for (t = current_target.beneath; t != NULL; t = t->beneath)
4431 if (t->to_call_history_range != NULL)
4432 {
4433 t->to_call_history_range (begin, end, flags);
4434 return;
4435 }
4436
4437 tcomplain ();
4438 }
4439
4440 static void
4441 debug_to_prepare_to_store (struct regcache *regcache)
4442 {
4443 debug_target.to_prepare_to_store (regcache);
4444
4445 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4446 }
4447
4448 static int
4449 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4450 int write, struct mem_attrib *attrib,
4451 struct target_ops *target)
4452 {
4453 int retval;
4454
4455 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4456 attrib, target);
4457
4458 fprintf_unfiltered (gdb_stdlog,
4459 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4460 paddress (target_gdbarch (), memaddr), len,
4461 write ? "write" : "read", retval);
4462
4463 if (retval > 0)
4464 {
4465 int i;
4466
4467 fputs_unfiltered (", bytes =", gdb_stdlog);
4468 for (i = 0; i < retval; i++)
4469 {
4470 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4471 {
4472 if (targetdebug < 2 && i > 0)
4473 {
4474 fprintf_unfiltered (gdb_stdlog, " ...");
4475 break;
4476 }
4477 fprintf_unfiltered (gdb_stdlog, "\n");
4478 }
4479
4480 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4481 }
4482 }
4483
4484 fputc_unfiltered ('\n', gdb_stdlog);
4485
4486 return retval;
4487 }
4488
4489 static void
4490 debug_to_files_info (struct target_ops *target)
4491 {
4492 debug_target.to_files_info (target);
4493
4494 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4495 }
4496
4497 static int
4498 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4499 struct bp_target_info *bp_tgt)
4500 {
4501 int retval;
4502
4503 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4504
4505 fprintf_unfiltered (gdb_stdlog,
4506 "target_insert_breakpoint (%s, xxx) = %ld\n",
4507 core_addr_to_string (bp_tgt->placed_address),
4508 (unsigned long) retval);
4509 return retval;
4510 }
4511
4512 static int
4513 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4514 struct bp_target_info *bp_tgt)
4515 {
4516 int retval;
4517
4518 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4519
4520 fprintf_unfiltered (gdb_stdlog,
4521 "target_remove_breakpoint (%s, xxx) = %ld\n",
4522 core_addr_to_string (bp_tgt->placed_address),
4523 (unsigned long) retval);
4524 return retval;
4525 }
4526
4527 static int
4528 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4529 {
4530 int retval;
4531
4532 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4533
4534 fprintf_unfiltered (gdb_stdlog,
4535 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4536 (unsigned long) type,
4537 (unsigned long) cnt,
4538 (unsigned long) from_tty,
4539 (unsigned long) retval);
4540 return retval;
4541 }
4542
4543 static int
4544 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4545 {
4546 CORE_ADDR retval;
4547
4548 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4549
4550 fprintf_unfiltered (gdb_stdlog,
4551 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4552 core_addr_to_string (addr), (unsigned long) len,
4553 core_addr_to_string (retval));
4554 return retval;
4555 }
4556
4557 static int
4558 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4559 struct expression *cond)
4560 {
4561 int retval;
4562
4563 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4564 rw, cond);
4565
4566 fprintf_unfiltered (gdb_stdlog,
4567 "target_can_accel_watchpoint_condition "
4568 "(%s, %d, %d, %s) = %ld\n",
4569 core_addr_to_string (addr), len, rw,
4570 host_address_to_string (cond), (unsigned long) retval);
4571 return retval;
4572 }
4573
4574 static int
4575 debug_to_stopped_by_watchpoint (void)
4576 {
4577 int retval;
4578
4579 retval = debug_target.to_stopped_by_watchpoint ();
4580
4581 fprintf_unfiltered (gdb_stdlog,
4582 "target_stopped_by_watchpoint () = %ld\n",
4583 (unsigned long) retval);
4584 return retval;
4585 }
4586
4587 static int
4588 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4589 {
4590 int retval;
4591
4592 retval = debug_target.to_stopped_data_address (target, addr);
4593
4594 fprintf_unfiltered (gdb_stdlog,
4595 "target_stopped_data_address ([%s]) = %ld\n",
4596 core_addr_to_string (*addr),
4597 (unsigned long)retval);
4598 return retval;
4599 }
4600
4601 static int
4602 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4603 CORE_ADDR addr,
4604 CORE_ADDR start, int length)
4605 {
4606 int retval;
4607
4608 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4609 start, length);
4610
4611 fprintf_filtered (gdb_stdlog,
4612 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4613 core_addr_to_string (addr), core_addr_to_string (start),
4614 length, retval);
4615 return retval;
4616 }
4617
4618 static int
4619 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4620 struct bp_target_info *bp_tgt)
4621 {
4622 int retval;
4623
4624 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4625
4626 fprintf_unfiltered (gdb_stdlog,
4627 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4628 core_addr_to_string (bp_tgt->placed_address),
4629 (unsigned long) retval);
4630 return retval;
4631 }
4632
4633 static int
4634 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4635 struct bp_target_info *bp_tgt)
4636 {
4637 int retval;
4638
4639 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4640
4641 fprintf_unfiltered (gdb_stdlog,
4642 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4643 core_addr_to_string (bp_tgt->placed_address),
4644 (unsigned long) retval);
4645 return retval;
4646 }
4647
4648 static int
4649 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4650 struct expression *cond)
4651 {
4652 int retval;
4653
4654 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4655
4656 fprintf_unfiltered (gdb_stdlog,
4657 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4658 core_addr_to_string (addr), len, type,
4659 host_address_to_string (cond), (unsigned long) retval);
4660 return retval;
4661 }
4662
4663 static int
4664 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4665 struct expression *cond)
4666 {
4667 int retval;
4668
4669 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4670
4671 fprintf_unfiltered (gdb_stdlog,
4672 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4673 core_addr_to_string (addr), len, type,
4674 host_address_to_string (cond), (unsigned long) retval);
4675 return retval;
4676 }
4677
4678 static void
4679 debug_to_terminal_init (void)
4680 {
4681 debug_target.to_terminal_init ();
4682
4683 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4684 }
4685
4686 static void
4687 debug_to_terminal_inferior (void)
4688 {
4689 debug_target.to_terminal_inferior ();
4690
4691 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4692 }
4693
4694 static void
4695 debug_to_terminal_ours_for_output (void)
4696 {
4697 debug_target.to_terminal_ours_for_output ();
4698
4699 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4700 }
4701
4702 static void
4703 debug_to_terminal_ours (void)
4704 {
4705 debug_target.to_terminal_ours ();
4706
4707 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4708 }
4709
4710 static void
4711 debug_to_terminal_save_ours (void)
4712 {
4713 debug_target.to_terminal_save_ours ();
4714
4715 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4716 }
4717
4718 static void
4719 debug_to_terminal_info (const char *arg, int from_tty)
4720 {
4721 debug_target.to_terminal_info (arg, from_tty);
4722
4723 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4724 from_tty);
4725 }
4726
4727 static void
4728 debug_to_load (char *args, int from_tty)
4729 {
4730 debug_target.to_load (args, from_tty);
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4733 }
4734
4735 static void
4736 debug_to_post_startup_inferior (ptid_t ptid)
4737 {
4738 debug_target.to_post_startup_inferior (ptid);
4739
4740 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4741 PIDGET (ptid));
4742 }
4743
4744 static int
4745 debug_to_insert_fork_catchpoint (int pid)
4746 {
4747 int retval;
4748
4749 retval = debug_target.to_insert_fork_catchpoint (pid);
4750
4751 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4752 pid, retval);
4753
4754 return retval;
4755 }
4756
4757 static int
4758 debug_to_remove_fork_catchpoint (int pid)
4759 {
4760 int retval;
4761
4762 retval = debug_target.to_remove_fork_catchpoint (pid);
4763
4764 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4765 pid, retval);
4766
4767 return retval;
4768 }
4769
4770 static int
4771 debug_to_insert_vfork_catchpoint (int pid)
4772 {
4773 int retval;
4774
4775 retval = debug_target.to_insert_vfork_catchpoint (pid);
4776
4777 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4778 pid, retval);
4779
4780 return retval;
4781 }
4782
4783 static int
4784 debug_to_remove_vfork_catchpoint (int pid)
4785 {
4786 int retval;
4787
4788 retval = debug_target.to_remove_vfork_catchpoint (pid);
4789
4790 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4791 pid, retval);
4792
4793 return retval;
4794 }
4795
4796 static int
4797 debug_to_insert_exec_catchpoint (int pid)
4798 {
4799 int retval;
4800
4801 retval = debug_target.to_insert_exec_catchpoint (pid);
4802
4803 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4804 pid, retval);
4805
4806 return retval;
4807 }
4808
4809 static int
4810 debug_to_remove_exec_catchpoint (int pid)
4811 {
4812 int retval;
4813
4814 retval = debug_target.to_remove_exec_catchpoint (pid);
4815
4816 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4817 pid, retval);
4818
4819 return retval;
4820 }
4821
4822 static int
4823 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4824 {
4825 int has_exited;
4826
4827 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4828
4829 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4830 pid, wait_status, *exit_status, has_exited);
4831
4832 return has_exited;
4833 }
4834
4835 static int
4836 debug_to_can_run (void)
4837 {
4838 int retval;
4839
4840 retval = debug_target.to_can_run ();
4841
4842 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4843
4844 return retval;
4845 }
4846
4847 static struct gdbarch *
4848 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4849 {
4850 struct gdbarch *retval;
4851
4852 retval = debug_target.to_thread_architecture (ops, ptid);
4853
4854 fprintf_unfiltered (gdb_stdlog,
4855 "target_thread_architecture (%s) = %s [%s]\n",
4856 target_pid_to_str (ptid),
4857 host_address_to_string (retval),
4858 gdbarch_bfd_arch_info (retval)->printable_name);
4859 return retval;
4860 }
4861
4862 static void
4863 debug_to_stop (ptid_t ptid)
4864 {
4865 debug_target.to_stop (ptid);
4866
4867 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4868 target_pid_to_str (ptid));
4869 }
4870
4871 static void
4872 debug_to_rcmd (char *command,
4873 struct ui_file *outbuf)
4874 {
4875 debug_target.to_rcmd (command, outbuf);
4876 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4877 }
4878
4879 static char *
4880 debug_to_pid_to_exec_file (int pid)
4881 {
4882 char *exec_file;
4883
4884 exec_file = debug_target.to_pid_to_exec_file (pid);
4885
4886 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4887 pid, exec_file);
4888
4889 return exec_file;
4890 }
4891
4892 static void
4893 setup_target_debug (void)
4894 {
4895 memcpy (&debug_target, &current_target, sizeof debug_target);
4896
4897 current_target.to_open = debug_to_open;
4898 current_target.to_post_attach = debug_to_post_attach;
4899 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4900 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4901 current_target.to_files_info = debug_to_files_info;
4902 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4903 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4904 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4905 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4906 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4907 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4908 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4909 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4910 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4911 current_target.to_watchpoint_addr_within_range
4912 = debug_to_watchpoint_addr_within_range;
4913 current_target.to_region_ok_for_hw_watchpoint
4914 = debug_to_region_ok_for_hw_watchpoint;
4915 current_target.to_can_accel_watchpoint_condition
4916 = debug_to_can_accel_watchpoint_condition;
4917 current_target.to_terminal_init = debug_to_terminal_init;
4918 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4919 current_target.to_terminal_ours_for_output
4920 = debug_to_terminal_ours_for_output;
4921 current_target.to_terminal_ours = debug_to_terminal_ours;
4922 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4923 current_target.to_terminal_info = debug_to_terminal_info;
4924 current_target.to_load = debug_to_load;
4925 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4926 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4927 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4928 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4929 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4930 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4931 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4932 current_target.to_has_exited = debug_to_has_exited;
4933 current_target.to_can_run = debug_to_can_run;
4934 current_target.to_stop = debug_to_stop;
4935 current_target.to_rcmd = debug_to_rcmd;
4936 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4937 current_target.to_thread_architecture = debug_to_thread_architecture;
4938 }
4939 \f
4940
4941 static char targ_desc[] =
4942 "Names of targets and files being debugged.\nShows the entire \
4943 stack of targets currently in use (including the exec-file,\n\
4944 core-file, and process, if any), as well as the symbol file name.";
4945
4946 static void
4947 do_monitor_command (char *cmd,
4948 int from_tty)
4949 {
4950 if ((current_target.to_rcmd
4951 == (void (*) (char *, struct ui_file *)) tcomplain)
4952 || (current_target.to_rcmd == debug_to_rcmd
4953 && (debug_target.to_rcmd
4954 == (void (*) (char *, struct ui_file *)) tcomplain)))
4955 error (_("\"monitor\" command not supported by this target."));
4956 target_rcmd (cmd, gdb_stdtarg);
4957 }
4958
4959 /* Print the name of each layers of our target stack. */
4960
4961 static void
4962 maintenance_print_target_stack (char *cmd, int from_tty)
4963 {
4964 struct target_ops *t;
4965
4966 printf_filtered (_("The current target stack is:\n"));
4967
4968 for (t = target_stack; t != NULL; t = t->beneath)
4969 {
4970 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4971 }
4972 }
4973
4974 /* Controls if async mode is permitted. */
4975 int target_async_permitted = 0;
4976
4977 /* The set command writes to this variable. If the inferior is
4978 executing, linux_nat_async_permitted is *not* updated. */
4979 static int target_async_permitted_1 = 0;
4980
4981 static void
4982 set_target_async_command (char *args, int from_tty,
4983 struct cmd_list_element *c)
4984 {
4985 if (have_live_inferiors ())
4986 {
4987 target_async_permitted_1 = target_async_permitted;
4988 error (_("Cannot change this setting while the inferior is running."));
4989 }
4990
4991 target_async_permitted = target_async_permitted_1;
4992 }
4993
4994 static void
4995 show_target_async_command (struct ui_file *file, int from_tty,
4996 struct cmd_list_element *c,
4997 const char *value)
4998 {
4999 fprintf_filtered (file,
5000 _("Controlling the inferior in "
5001 "asynchronous mode is %s.\n"), value);
5002 }
5003
5004 /* Temporary copies of permission settings. */
5005
5006 static int may_write_registers_1 = 1;
5007 static int may_write_memory_1 = 1;
5008 static int may_insert_breakpoints_1 = 1;
5009 static int may_insert_tracepoints_1 = 1;
5010 static int may_insert_fast_tracepoints_1 = 1;
5011 static int may_stop_1 = 1;
5012
5013 /* Make the user-set values match the real values again. */
5014
5015 void
5016 update_target_permissions (void)
5017 {
5018 may_write_registers_1 = may_write_registers;
5019 may_write_memory_1 = may_write_memory;
5020 may_insert_breakpoints_1 = may_insert_breakpoints;
5021 may_insert_tracepoints_1 = may_insert_tracepoints;
5022 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5023 may_stop_1 = may_stop;
5024 }
5025
5026 /* The one function handles (most of) the permission flags in the same
5027 way. */
5028
5029 static void
5030 set_target_permissions (char *args, int from_tty,
5031 struct cmd_list_element *c)
5032 {
5033 if (target_has_execution)
5034 {
5035 update_target_permissions ();
5036 error (_("Cannot change this setting while the inferior is running."));
5037 }
5038
5039 /* Make the real values match the user-changed values. */
5040 may_write_registers = may_write_registers_1;
5041 may_insert_breakpoints = may_insert_breakpoints_1;
5042 may_insert_tracepoints = may_insert_tracepoints_1;
5043 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5044 may_stop = may_stop_1;
5045 update_observer_mode ();
5046 }
5047
5048 /* Set memory write permission independently of observer mode. */
5049
5050 static void
5051 set_write_memory_permission (char *args, int from_tty,
5052 struct cmd_list_element *c)
5053 {
5054 /* Make the real values match the user-changed values. */
5055 may_write_memory = may_write_memory_1;
5056 update_observer_mode ();
5057 }
5058
5059
5060 void
5061 initialize_targets (void)
5062 {
5063 init_dummy_target ();
5064 push_target (&dummy_target);
5065
5066 add_info ("target", target_info, targ_desc);
5067 add_info ("files", target_info, targ_desc);
5068
5069 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5070 Set target debugging."), _("\
5071 Show target debugging."), _("\
5072 When non-zero, target debugging is enabled. Higher numbers are more\n\
5073 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5074 command."),
5075 NULL,
5076 show_targetdebug,
5077 &setdebuglist, &showdebuglist);
5078
5079 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5080 &trust_readonly, _("\
5081 Set mode for reading from readonly sections."), _("\
5082 Show mode for reading from readonly sections."), _("\
5083 When this mode is on, memory reads from readonly sections (such as .text)\n\
5084 will be read from the object file instead of from the target. This will\n\
5085 result in significant performance improvement for remote targets."),
5086 NULL,
5087 show_trust_readonly,
5088 &setlist, &showlist);
5089
5090 add_com ("monitor", class_obscure, do_monitor_command,
5091 _("Send a command to the remote monitor (remote targets only)."));
5092
5093 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5094 _("Print the name of each layer of the internal target stack."),
5095 &maintenanceprintlist);
5096
5097 add_setshow_boolean_cmd ("target-async", no_class,
5098 &target_async_permitted_1, _("\
5099 Set whether gdb controls the inferior in asynchronous mode."), _("\
5100 Show whether gdb controls the inferior in asynchronous mode."), _("\
5101 Tells gdb whether to control the inferior in asynchronous mode."),
5102 set_target_async_command,
5103 show_target_async_command,
5104 &setlist,
5105 &showlist);
5106
5107 add_setshow_boolean_cmd ("stack-cache", class_support,
5108 &stack_cache_enabled_p_1, _("\
5109 Set cache use for stack access."), _("\
5110 Show cache use for stack access."), _("\
5111 When on, use the data cache for all stack access, regardless of any\n\
5112 configured memory regions. This improves remote performance significantly.\n\
5113 By default, caching for stack access is on."),
5114 set_stack_cache_enabled_p,
5115 show_stack_cache_enabled_p,
5116 &setlist, &showlist);
5117
5118 add_setshow_boolean_cmd ("may-write-registers", class_support,
5119 &may_write_registers_1, _("\
5120 Set permission to write into registers."), _("\
5121 Show permission to write into registers."), _("\
5122 When this permission is on, GDB may write into the target's registers.\n\
5123 Otherwise, any sort of write attempt will result in an error."),
5124 set_target_permissions, NULL,
5125 &setlist, &showlist);
5126
5127 add_setshow_boolean_cmd ("may-write-memory", class_support,
5128 &may_write_memory_1, _("\
5129 Set permission to write into target memory."), _("\
5130 Show permission to write into target memory."), _("\
5131 When this permission is on, GDB may write into the target's memory.\n\
5132 Otherwise, any sort of write attempt will result in an error."),
5133 set_write_memory_permission, NULL,
5134 &setlist, &showlist);
5135
5136 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5137 &may_insert_breakpoints_1, _("\
5138 Set permission to insert breakpoints in the target."), _("\
5139 Show permission to insert breakpoints in the target."), _("\
5140 When this permission is on, GDB may insert breakpoints in the program.\n\
5141 Otherwise, any sort of insertion attempt will result in an error."),
5142 set_target_permissions, NULL,
5143 &setlist, &showlist);
5144
5145 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5146 &may_insert_tracepoints_1, _("\
5147 Set permission to insert tracepoints in the target."), _("\
5148 Show permission to insert tracepoints in the target."), _("\
5149 When this permission is on, GDB may insert tracepoints in the program.\n\
5150 Otherwise, any sort of insertion attempt will result in an error."),
5151 set_target_permissions, NULL,
5152 &setlist, &showlist);
5153
5154 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5155 &may_insert_fast_tracepoints_1, _("\
5156 Set permission to insert fast tracepoints in the target."), _("\
5157 Show permission to insert fast tracepoints in the target."), _("\
5158 When this permission is on, GDB may insert fast tracepoints.\n\
5159 Otherwise, any sort of insertion attempt will result in an error."),
5160 set_target_permissions, NULL,
5161 &setlist, &showlist);
5162
5163 add_setshow_boolean_cmd ("may-interrupt", class_support,
5164 &may_stop_1, _("\
5165 Set permission to interrupt or signal the target."), _("\
5166 Show permission to interrupt or signal the target."), _("\
5167 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5168 Otherwise, any attempt to interrupt or stop will be ignored."),
5169 set_target_permissions, NULL,
5170 &setlist, &showlist);
5171
5172
5173 target_dcache = dcache_init ();
5174 }
This page took 0.225358 seconds and 5 git commands to generate.