convert to_trace_start
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static int return_minus_one (void);
70
71 static void *return_null (void);
72
73 void target_ignore (void);
74
75 static void target_command (char *, int);
76
77 static struct target_ops *find_default_run_target (char *);
78
79 static target_xfer_partial_ftype default_xfer_partial;
80
81 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
82 ptid_t ptid);
83
84 static int dummy_find_memory_regions (struct target_ops *self,
85 find_memory_region_ftype ignore1,
86 void *ignore2);
87
88 static char *dummy_make_corefile_notes (struct target_ops *self,
89 bfd *ignore1, int *ignore2);
90
91 static int find_default_can_async_p (struct target_ops *ignore);
92
93 static int find_default_is_async_p (struct target_ops *ignore);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 #include "target-delegates.c"
99
100 static void init_dummy_target (void);
101
102 static struct target_ops debug_target;
103
104 static void debug_to_open (char *, int);
105
106 static void debug_to_prepare_to_store (struct target_ops *self,
107 struct regcache *);
108
109 static void debug_to_files_info (struct target_ops *);
110
111 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
118 int, int, int);
119
120 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
121 struct gdbarch *,
122 struct bp_target_info *);
123
124 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
125 struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_insert_watchpoint (struct target_ops *self,
129 CORE_ADDR, int, int,
130 struct expression *);
131
132 static int debug_to_remove_watchpoint (struct target_ops *self,
133 CORE_ADDR, int, int,
134 struct expression *);
135
136 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
137
138 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
139 CORE_ADDR, CORE_ADDR, int);
140
141 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
142 CORE_ADDR, int);
143
144 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
145 CORE_ADDR, int, int,
146 struct expression *);
147
148 static void debug_to_terminal_init (struct target_ops *self);
149
150 static void debug_to_terminal_inferior (struct target_ops *self);
151
152 static void debug_to_terminal_ours_for_output (struct target_ops *self);
153
154 static void debug_to_terminal_save_ours (struct target_ops *self);
155
156 static void debug_to_terminal_ours (struct target_ops *self);
157
158 static void debug_to_load (struct target_ops *self, char *, int);
159
160 static int debug_to_can_run (struct target_ops *self);
161
162 static void debug_to_stop (struct target_ops *self, ptid_t);
163
164 /* Pointer to array of target architecture structures; the size of the
165 array; the current index into the array; the allocated size of the
166 array. */
167 struct target_ops **target_structs;
168 unsigned target_struct_size;
169 unsigned target_struct_allocsize;
170 #define DEFAULT_ALLOCSIZE 10
171
172 /* The initial current target, so that there is always a semi-valid
173 current target. */
174
175 static struct target_ops dummy_target;
176
177 /* Top of target stack. */
178
179 static struct target_ops *target_stack;
180
181 /* The target structure we are currently using to talk to a process
182 or file or whatever "inferior" we have. */
183
184 struct target_ops current_target;
185
186 /* Command list for target. */
187
188 static struct cmd_list_element *targetlist = NULL;
189
190 /* Nonzero if we should trust readonly sections from the
191 executable when reading memory. */
192
193 static int trust_readonly = 0;
194
195 /* Nonzero if we should show true memory content including
196 memory breakpoint inserted by gdb. */
197
198 static int show_memory_breakpoints = 0;
199
200 /* These globals control whether GDB attempts to perform these
201 operations; they are useful for targets that need to prevent
202 inadvertant disruption, such as in non-stop mode. */
203
204 int may_write_registers = 1;
205
206 int may_write_memory = 1;
207
208 int may_insert_breakpoints = 1;
209
210 int may_insert_tracepoints = 1;
211
212 int may_insert_fast_tracepoints = 1;
213
214 int may_stop = 1;
215
216 /* Non-zero if we want to see trace of target level stuff. */
217
218 static unsigned int targetdebug = 0;
219 static void
220 show_targetdebug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
224 }
225
226 static void setup_target_debug (void);
227
228 /* The user just typed 'target' without the name of a target. */
229
230 static void
231 target_command (char *arg, int from_tty)
232 {
233 fputs_filtered ("Argument required (target name). Try `help target'\n",
234 gdb_stdout);
235 }
236
237 /* Default target_has_* methods for process_stratum targets. */
238
239 int
240 default_child_has_all_memory (struct target_ops *ops)
241 {
242 /* If no inferior selected, then we can't read memory here. */
243 if (ptid_equal (inferior_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249 int
250 default_child_has_memory (struct target_ops *ops)
251 {
252 /* If no inferior selected, then we can't read memory here. */
253 if (ptid_equal (inferior_ptid, null_ptid))
254 return 0;
255
256 return 1;
257 }
258
259 int
260 default_child_has_stack (struct target_ops *ops)
261 {
262 /* If no inferior selected, there's no stack. */
263 if (ptid_equal (inferior_ptid, null_ptid))
264 return 0;
265
266 return 1;
267 }
268
269 int
270 default_child_has_registers (struct target_ops *ops)
271 {
272 /* Can't read registers from no inferior. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
281 {
282 /* If there's no thread selected, then we can't make it run through
283 hoops. */
284 if (ptid_equal (the_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290
291 int
292 target_has_all_memory_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_all_memory (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_memory_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_memory (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_stack_1 (void)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_stack (t))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_registers_1 (void)
329 {
330 struct target_ops *t;
331
332 for (t = current_target.beneath; t != NULL; t = t->beneath)
333 if (t->to_has_registers (t))
334 return 1;
335
336 return 0;
337 }
338
339 int
340 target_has_execution_1 (ptid_t the_ptid)
341 {
342 struct target_ops *t;
343
344 for (t = current_target.beneath; t != NULL; t = t->beneath)
345 if (t->to_has_execution (t, the_ptid))
346 return 1;
347
348 return 0;
349 }
350
351 int
352 target_has_execution_current (void)
353 {
354 return target_has_execution_1 (inferior_ptid);
355 }
356
357 /* Complete initialization of T. This ensures that various fields in
358 T are set, if needed by the target implementation. */
359
360 void
361 complete_target_initialization (struct target_ops *t)
362 {
363 /* Provide default values for all "must have" methods. */
364 if (t->to_xfer_partial == NULL)
365 t->to_xfer_partial = default_xfer_partial;
366
367 if (t->to_has_all_memory == NULL)
368 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
369
370 if (t->to_has_memory == NULL)
371 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
372
373 if (t->to_has_stack == NULL)
374 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
375
376 if (t->to_has_registers == NULL)
377 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
378
379 if (t->to_has_execution == NULL)
380 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
381
382 install_delegators (t);
383 }
384
385 /* Add possible target architecture T to the list and add a new
386 command 'target T->to_shortname'. Set COMPLETER as the command's
387 completer if not NULL. */
388
389 void
390 add_target_with_completer (struct target_ops *t,
391 completer_ftype *completer)
392 {
393 struct cmd_list_element *c;
394
395 complete_target_initialization (t);
396
397 if (!target_structs)
398 {
399 target_struct_allocsize = DEFAULT_ALLOCSIZE;
400 target_structs = (struct target_ops **) xmalloc
401 (target_struct_allocsize * sizeof (*target_structs));
402 }
403 if (target_struct_size >= target_struct_allocsize)
404 {
405 target_struct_allocsize *= 2;
406 target_structs = (struct target_ops **)
407 xrealloc ((char *) target_structs,
408 target_struct_allocsize * sizeof (*target_structs));
409 }
410 target_structs[target_struct_size++] = t;
411
412 if (targetlist == NULL)
413 add_prefix_cmd ("target", class_run, target_command, _("\
414 Connect to a target machine or process.\n\
415 The first argument is the type or protocol of the target machine.\n\
416 Remaining arguments are interpreted by the target protocol. For more\n\
417 information on the arguments for a particular protocol, type\n\
418 `help target ' followed by the protocol name."),
419 &targetlist, "target ", 0, &cmdlist);
420 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
421 &targetlist);
422 if (completer != NULL)
423 set_cmd_completer (c, completer);
424 }
425
426 /* Add a possible target architecture to the list. */
427
428 void
429 add_target (struct target_ops *t)
430 {
431 add_target_with_completer (t, NULL);
432 }
433
434 /* See target.h. */
435
436 void
437 add_deprecated_target_alias (struct target_ops *t, char *alias)
438 {
439 struct cmd_list_element *c;
440 char *alt;
441
442 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
443 see PR cli/15104. */
444 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
445 alt = xstrprintf ("target %s", t->to_shortname);
446 deprecate_cmd (c, alt);
447 }
448
449 /* Stub functions */
450
451 void
452 target_ignore (void)
453 {
454 }
455
456 void
457 target_kill (void)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 if (t->to_kill != NULL)
463 {
464 if (targetdebug)
465 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
466
467 t->to_kill (t);
468 return;
469 }
470
471 noprocess ();
472 }
473
474 void
475 target_load (char *arg, int from_tty)
476 {
477 target_dcache_invalidate ();
478 (*current_target.to_load) (&current_target, arg, from_tty);
479 }
480
481 void
482 target_create_inferior (char *exec_file, char *args,
483 char **env, int from_tty)
484 {
485 struct target_ops *t;
486
487 for (t = current_target.beneath; t != NULL; t = t->beneath)
488 {
489 if (t->to_create_inferior != NULL)
490 {
491 t->to_create_inferior (t, exec_file, args, env, from_tty);
492 if (targetdebug)
493 fprintf_unfiltered (gdb_stdlog,
494 "target_create_inferior (%s, %s, xxx, %d)\n",
495 exec_file, args, from_tty);
496 return;
497 }
498 }
499
500 internal_error (__FILE__, __LINE__,
501 _("could not find a target to create inferior"));
502 }
503
504 void
505 target_terminal_inferior (void)
506 {
507 /* A background resume (``run&'') should leave GDB in control of the
508 terminal. Use target_can_async_p, not target_is_async_p, since at
509 this point the target is not async yet. However, if sync_execution
510 is not set, we know it will become async prior to resume. */
511 if (target_can_async_p () && !sync_execution)
512 return;
513
514 /* If GDB is resuming the inferior in the foreground, install
515 inferior's terminal modes. */
516 (*current_target.to_terminal_inferior) (&current_target);
517 }
518
519 static int
520 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
521 struct target_ops *t)
522 {
523 errno = EIO; /* Can't read/write this location. */
524 return 0; /* No bytes handled. */
525 }
526
527 static void
528 tcomplain (void)
529 {
530 error (_("You can't do that when your target is `%s'"),
531 current_target.to_shortname);
532 }
533
534 void
535 noprocess (void)
536 {
537 error (_("You can't do that without a process to debug."));
538 }
539
540 static void
541 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
542 {
543 printf_unfiltered (_("No saved terminal information.\n"));
544 }
545
546 /* A default implementation for the to_get_ada_task_ptid target method.
547
548 This function builds the PTID by using both LWP and TID as part of
549 the PTID lwp and tid elements. The pid used is the pid of the
550 inferior_ptid. */
551
552 static ptid_t
553 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
554 {
555 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
556 }
557
558 static enum exec_direction_kind
559 default_execution_direction (struct target_ops *self)
560 {
561 if (!target_can_execute_reverse)
562 return EXEC_FORWARD;
563 else if (!target_can_async_p ())
564 return EXEC_FORWARD;
565 else
566 gdb_assert_not_reached ("\
567 to_execution_direction must be implemented for reverse async");
568 }
569
570 /* Go through the target stack from top to bottom, copying over zero
571 entries in current_target, then filling in still empty entries. In
572 effect, we are doing class inheritance through the pushed target
573 vectors.
574
575 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
576 is currently implemented, is that it discards any knowledge of
577 which target an inherited method originally belonged to.
578 Consequently, new new target methods should instead explicitly and
579 locally search the target stack for the target that can handle the
580 request. */
581
582 static void
583 update_current_target (void)
584 {
585 struct target_ops *t;
586
587 /* First, reset current's contents. */
588 memset (&current_target, 0, sizeof (current_target));
589
590 /* Install the delegators. */
591 install_delegators (&current_target);
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 /* Do not inherit to_post_attach. */
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 /* Do not inherit to_prepare_to_store. */
614 INHERIT (deprecated_xfer_memory, t);
615 /* Do not inherit to_files_info. */
616 /* Do not inherit to_insert_breakpoint. */
617 /* Do not inherit to_remove_breakpoint. */
618 /* Do not inherit to_can_use_hw_breakpoint. */
619 /* Do not inherit to_insert_hw_breakpoint. */
620 /* Do not inherit to_remove_hw_breakpoint. */
621 /* Do not inherit to_ranged_break_num_registers. */
622 /* Do not inherit to_insert_watchpoint. */
623 /* Do not inherit to_remove_watchpoint. */
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 /* Do not inherit to_stopped_data_address. */
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 /* Do not inherit to_stopped_by_watchpoint. */
630 /* Do not inherit to_watchpoint_addr_within_range. */
631 /* Do not inherit to_region_ok_for_hw_watchpoint. */
632 /* Do not inherit to_can_accel_watchpoint_condition. */
633 /* Do not inherit to_masked_watch_num_registers. */
634 /* Do not inherit to_terminal_init. */
635 /* Do not inherit to_terminal_inferior. */
636 /* Do not inherit to_terminal_ours_for_output. */
637 /* Do not inherit to_terminal_ours. */
638 /* Do not inherit to_terminal_save_ours. */
639 /* Do not inherit to_terminal_info. */
640 /* Do not inherit to_kill. */
641 /* Do not inherit to_load. */
642 /* Do no inherit to_create_inferior. */
643 /* Do not inherit to_post_startup_inferior. */
644 /* Do not inherit to_insert_fork_catchpoint. */
645 /* Do not inherit to_remove_fork_catchpoint. */
646 /* Do not inherit to_insert_vfork_catchpoint. */
647 /* Do not inherit to_remove_vfork_catchpoint. */
648 /* Do not inherit to_follow_fork. */
649 /* Do not inherit to_insert_exec_catchpoint. */
650 /* Do not inherit to_remove_exec_catchpoint. */
651 /* Do not inherit to_set_syscall_catchpoint. */
652 /* Do not inherit to_has_exited. */
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 /* Do not inherit to_extra_thread_info. */
661 /* Do not inherit to_thread_name. */
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 /* Do not inherit to_rcmd. */
665 /* Do not inherit to_pid_to_exec_file. */
666 /* Do not inherit to_log_command. */
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 /* Do not inherit to_can_async_p. */
675 /* Do not inherit to_is_async_p. */
676 /* Do not inherit to_async. */
677 /* Do not inherit to_find_memory_regions. */
678 /* Do not inherit to_make_corefile_notes. */
679 /* Do not inherit to_get_bookmark. */
680 /* Do not inherit to_goto_bookmark. */
681 /* Do not inherit to_get_thread_local_address. */
682 /* Do not inherit to_can_execute_reverse. */
683 /* Do not inherit to_execution_direction. */
684 /* Do not inherit to_thread_architecture. */
685 /* Do not inherit to_read_description. */
686 /* Do not inherit to_get_ada_task_ptid. */
687 /* Do not inherit to_search_memory. */
688 /* Do not inherit to_supports_multi_process. */
689 /* Do not inherit to_supports_enable_disable_tracepoint. */
690 /* Do not inherit to_supports_string_tracing. */
691 /* Do not inherit to_trace_init. */
692 /* Do not inherit to_download_tracepoint. */
693 /* Do not inherit to_can_download_tracepoint. */
694 /* Do not inherit to_download_trace_state_variable. */
695 /* Do not inherit to_enable_tracepoint. */
696 /* Do not inherit to_disable_tracepoint. */
697 /* Do not inherit to_trace_set_readonly_regions. */
698 /* Do not inherit to_trace_start. */
699 INHERIT (to_get_trace_status, t);
700 INHERIT (to_get_tracepoint_status, t);
701 INHERIT (to_trace_stop, t);
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_augmented_libraries_svr4_read, t);
721 INHERIT (to_magic, t);
722 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
723 INHERIT (to_can_run_breakpoint_commands, t);
724 /* Do not inherit to_memory_map. */
725 /* Do not inherit to_flash_erase. */
726 /* Do not inherit to_flash_done. */
727 }
728 #undef INHERIT
729
730 /* Clean up a target struct so it no longer has any zero pointers in
731 it. Some entries are defaulted to a method that print an error,
732 others are hard-wired to a standard recursive default. */
733
734 #define de_fault(field, value) \
735 if (!current_target.field) \
736 current_target.field = value
737
738 de_fault (to_open,
739 (void (*) (char *, int))
740 tcomplain);
741 de_fault (to_close,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (deprecated_xfer_memory,
745 (int (*) (CORE_ADDR, gdb_byte *, int, int,
746 struct mem_attrib *, struct target_ops *))
747 nomemory);
748 de_fault (to_can_run,
749 (int (*) (struct target_ops *))
750 return_zero);
751 de_fault (to_stop,
752 (void (*) (struct target_ops *, ptid_t))
753 target_ignore);
754 current_target.to_read_description = NULL;
755 de_fault (to_get_trace_status,
756 (int (*) (struct target_ops *, struct trace_status *))
757 return_minus_one);
758 de_fault (to_get_tracepoint_status,
759 (void (*) (struct target_ops *, struct breakpoint *,
760 struct uploaded_tp *))
761 tcomplain);
762 de_fault (to_trace_stop,
763 (void (*) (struct target_ops *))
764 tcomplain);
765 de_fault (to_trace_find,
766 (int (*) (struct target_ops *,
767 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
768 return_minus_one);
769 de_fault (to_get_trace_state_variable_value,
770 (int (*) (struct target_ops *, int, LONGEST *))
771 return_zero);
772 de_fault (to_save_trace_data,
773 (int (*) (struct target_ops *, const char *))
774 tcomplain);
775 de_fault (to_upload_tracepoints,
776 (int (*) (struct target_ops *, struct uploaded_tp **))
777 return_zero);
778 de_fault (to_upload_trace_state_variables,
779 (int (*) (struct target_ops *, struct uploaded_tsv **))
780 return_zero);
781 de_fault (to_get_raw_trace_data,
782 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
783 tcomplain);
784 de_fault (to_get_min_fast_tracepoint_insn_len,
785 (int (*) (struct target_ops *))
786 return_minus_one);
787 de_fault (to_set_disconnected_tracing,
788 (void (*) (struct target_ops *, int))
789 target_ignore);
790 de_fault (to_set_circular_trace_buffer,
791 (void (*) (struct target_ops *, int))
792 target_ignore);
793 de_fault (to_set_trace_buffer_size,
794 (void (*) (struct target_ops *, LONGEST))
795 target_ignore);
796 de_fault (to_set_trace_notes,
797 (int (*) (struct target_ops *,
798 const char *, const char *, const char *))
799 return_zero);
800 de_fault (to_get_tib_address,
801 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
802 tcomplain);
803 de_fault (to_set_permissions,
804 (void (*) (struct target_ops *))
805 target_ignore);
806 de_fault (to_static_tracepoint_marker_at,
807 (int (*) (struct target_ops *,
808 CORE_ADDR, struct static_tracepoint_marker *))
809 return_zero);
810 de_fault (to_static_tracepoint_markers_by_strid,
811 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
812 const char *))
813 tcomplain);
814 de_fault (to_traceframe_info,
815 (struct traceframe_info * (*) (struct target_ops *))
816 return_null);
817 de_fault (to_supports_evaluation_of_breakpoint_conditions,
818 (int (*) (struct target_ops *))
819 return_zero);
820 de_fault (to_can_run_breakpoint_commands,
821 (int (*) (struct target_ops *))
822 return_zero);
823 de_fault (to_use_agent,
824 (int (*) (struct target_ops *, int))
825 tcomplain);
826 de_fault (to_can_use_agent,
827 (int (*) (struct target_ops *))
828 return_zero);
829 de_fault (to_augmented_libraries_svr4_read,
830 (int (*) (struct target_ops *))
831 return_zero);
832
833 #undef de_fault
834
835 /* Finally, position the target-stack beneath the squashed
836 "current_target". That way code looking for a non-inherited
837 target method can quickly and simply find it. */
838 current_target.beneath = target_stack;
839
840 if (targetdebug)
841 setup_target_debug ();
842 }
843
844 /* Push a new target type into the stack of the existing target accessors,
845 possibly superseding some of the existing accessors.
846
847 Rather than allow an empty stack, we always have the dummy target at
848 the bottom stratum, so we can call the function vectors without
849 checking them. */
850
851 void
852 push_target (struct target_ops *t)
853 {
854 struct target_ops **cur;
855
856 /* Check magic number. If wrong, it probably means someone changed
857 the struct definition, but not all the places that initialize one. */
858 if (t->to_magic != OPS_MAGIC)
859 {
860 fprintf_unfiltered (gdb_stderr,
861 "Magic number of %s target struct wrong\n",
862 t->to_shortname);
863 internal_error (__FILE__, __LINE__,
864 _("failed internal consistency check"));
865 }
866
867 /* Find the proper stratum to install this target in. */
868 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
869 {
870 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
871 break;
872 }
873
874 /* If there's already targets at this stratum, remove them. */
875 /* FIXME: cagney/2003-10-15: I think this should be popping all
876 targets to CUR, and not just those at this stratum level. */
877 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
878 {
879 /* There's already something at this stratum level. Close it,
880 and un-hook it from the stack. */
881 struct target_ops *tmp = (*cur);
882
883 (*cur) = (*cur)->beneath;
884 tmp->beneath = NULL;
885 target_close (tmp);
886 }
887
888 /* We have removed all targets in our stratum, now add the new one. */
889 t->beneath = (*cur);
890 (*cur) = t;
891
892 update_current_target ();
893 }
894
895 /* Remove a target_ops vector from the stack, wherever it may be.
896 Return how many times it was removed (0 or 1). */
897
898 int
899 unpush_target (struct target_ops *t)
900 {
901 struct target_ops **cur;
902 struct target_ops *tmp;
903
904 if (t->to_stratum == dummy_stratum)
905 internal_error (__FILE__, __LINE__,
906 _("Attempt to unpush the dummy target"));
907
908 /* Look for the specified target. Note that we assume that a target
909 can only occur once in the target stack. */
910
911 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
912 {
913 if ((*cur) == t)
914 break;
915 }
916
917 /* If we don't find target_ops, quit. Only open targets should be
918 closed. */
919 if ((*cur) == NULL)
920 return 0;
921
922 /* Unchain the target. */
923 tmp = (*cur);
924 (*cur) = (*cur)->beneath;
925 tmp->beneath = NULL;
926
927 update_current_target ();
928
929 /* Finally close the target. Note we do this after unchaining, so
930 any target method calls from within the target_close
931 implementation don't end up in T anymore. */
932 target_close (t);
933
934 return 1;
935 }
936
937 void
938 pop_all_targets_above (enum strata above_stratum)
939 {
940 while ((int) (current_target.to_stratum) > (int) above_stratum)
941 {
942 if (!unpush_target (target_stack))
943 {
944 fprintf_unfiltered (gdb_stderr,
945 "pop_all_targets couldn't find target %s\n",
946 target_stack->to_shortname);
947 internal_error (__FILE__, __LINE__,
948 _("failed internal consistency check"));
949 break;
950 }
951 }
952 }
953
954 void
955 pop_all_targets (void)
956 {
957 pop_all_targets_above (dummy_stratum);
958 }
959
960 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
961
962 int
963 target_is_pushed (struct target_ops *t)
964 {
965 struct target_ops **cur;
966
967 /* Check magic number. If wrong, it probably means someone changed
968 the struct definition, but not all the places that initialize one. */
969 if (t->to_magic != OPS_MAGIC)
970 {
971 fprintf_unfiltered (gdb_stderr,
972 "Magic number of %s target struct wrong\n",
973 t->to_shortname);
974 internal_error (__FILE__, __LINE__,
975 _("failed internal consistency check"));
976 }
977
978 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
979 if (*cur == t)
980 return 1;
981
982 return 0;
983 }
984
985 /* Using the objfile specified in OBJFILE, find the address for the
986 current thread's thread-local storage with offset OFFSET. */
987 CORE_ADDR
988 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
989 {
990 volatile CORE_ADDR addr = 0;
991 struct target_ops *target;
992
993 for (target = current_target.beneath;
994 target != NULL;
995 target = target->beneath)
996 {
997 if (target->to_get_thread_local_address != NULL)
998 break;
999 }
1000
1001 if (target != NULL
1002 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1003 {
1004 ptid_t ptid = inferior_ptid;
1005 volatile struct gdb_exception ex;
1006
1007 TRY_CATCH (ex, RETURN_MASK_ALL)
1008 {
1009 CORE_ADDR lm_addr;
1010
1011 /* Fetch the load module address for this objfile. */
1012 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1013 objfile);
1014 /* If it's 0, throw the appropriate exception. */
1015 if (lm_addr == 0)
1016 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1017 _("TLS load module not found"));
1018
1019 addr = target->to_get_thread_local_address (target, ptid,
1020 lm_addr, offset);
1021 }
1022 /* If an error occurred, print TLS related messages here. Otherwise,
1023 throw the error to some higher catcher. */
1024 if (ex.reason < 0)
1025 {
1026 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1027
1028 switch (ex.error)
1029 {
1030 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1031 error (_("Cannot find thread-local variables "
1032 "in this thread library."));
1033 break;
1034 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1035 if (objfile_is_library)
1036 error (_("Cannot find shared library `%s' in dynamic"
1037 " linker's load module list"), objfile_name (objfile));
1038 else
1039 error (_("Cannot find executable file `%s' in dynamic"
1040 " linker's load module list"), objfile_name (objfile));
1041 break;
1042 case TLS_NOT_ALLOCATED_YET_ERROR:
1043 if (objfile_is_library)
1044 error (_("The inferior has not yet allocated storage for"
1045 " thread-local variables in\n"
1046 "the shared library `%s'\n"
1047 "for %s"),
1048 objfile_name (objfile), target_pid_to_str (ptid));
1049 else
1050 error (_("The inferior has not yet allocated storage for"
1051 " thread-local variables in\n"
1052 "the executable `%s'\n"
1053 "for %s"),
1054 objfile_name (objfile), target_pid_to_str (ptid));
1055 break;
1056 case TLS_GENERIC_ERROR:
1057 if (objfile_is_library)
1058 error (_("Cannot find thread-local storage for %s, "
1059 "shared library %s:\n%s"),
1060 target_pid_to_str (ptid),
1061 objfile_name (objfile), ex.message);
1062 else
1063 error (_("Cannot find thread-local storage for %s, "
1064 "executable file %s:\n%s"),
1065 target_pid_to_str (ptid),
1066 objfile_name (objfile), ex.message);
1067 break;
1068 default:
1069 throw_exception (ex);
1070 break;
1071 }
1072 }
1073 }
1074 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1075 TLS is an ABI-specific thing. But we don't do that yet. */
1076 else
1077 error (_("Cannot find thread-local variables on this target"));
1078
1079 return addr;
1080 }
1081
1082 const char *
1083 target_xfer_status_to_string (enum target_xfer_status err)
1084 {
1085 #define CASE(X) case X: return #X
1086 switch (err)
1087 {
1088 CASE(TARGET_XFER_E_IO);
1089 CASE(TARGET_XFER_E_UNAVAILABLE);
1090 default:
1091 return "<unknown>";
1092 }
1093 #undef CASE
1094 };
1095
1096
1097 #undef MIN
1098 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1099
1100 /* target_read_string -- read a null terminated string, up to LEN bytes,
1101 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1102 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1103 is responsible for freeing it. Return the number of bytes successfully
1104 read. */
1105
1106 int
1107 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1108 {
1109 int tlen, offset, i;
1110 gdb_byte buf[4];
1111 int errcode = 0;
1112 char *buffer;
1113 int buffer_allocated;
1114 char *bufptr;
1115 unsigned int nbytes_read = 0;
1116
1117 gdb_assert (string);
1118
1119 /* Small for testing. */
1120 buffer_allocated = 4;
1121 buffer = xmalloc (buffer_allocated);
1122 bufptr = buffer;
1123
1124 while (len > 0)
1125 {
1126 tlen = MIN (len, 4 - (memaddr & 3));
1127 offset = memaddr & 3;
1128
1129 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1130 if (errcode != 0)
1131 {
1132 /* The transfer request might have crossed the boundary to an
1133 unallocated region of memory. Retry the transfer, requesting
1134 a single byte. */
1135 tlen = 1;
1136 offset = 0;
1137 errcode = target_read_memory (memaddr, buf, 1);
1138 if (errcode != 0)
1139 goto done;
1140 }
1141
1142 if (bufptr - buffer + tlen > buffer_allocated)
1143 {
1144 unsigned int bytes;
1145
1146 bytes = bufptr - buffer;
1147 buffer_allocated *= 2;
1148 buffer = xrealloc (buffer, buffer_allocated);
1149 bufptr = buffer + bytes;
1150 }
1151
1152 for (i = 0; i < tlen; i++)
1153 {
1154 *bufptr++ = buf[i + offset];
1155 if (buf[i + offset] == '\000')
1156 {
1157 nbytes_read += i + 1;
1158 goto done;
1159 }
1160 }
1161
1162 memaddr += tlen;
1163 len -= tlen;
1164 nbytes_read += tlen;
1165 }
1166 done:
1167 *string = buffer;
1168 if (errnop != NULL)
1169 *errnop = errcode;
1170 return nbytes_read;
1171 }
1172
1173 struct target_section_table *
1174 target_get_section_table (struct target_ops *target)
1175 {
1176 struct target_ops *t;
1177
1178 if (targetdebug)
1179 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1180
1181 for (t = target; t != NULL; t = t->beneath)
1182 if (t->to_get_section_table != NULL)
1183 return (*t->to_get_section_table) (t);
1184
1185 return NULL;
1186 }
1187
1188 /* Find a section containing ADDR. */
1189
1190 struct target_section *
1191 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1192 {
1193 struct target_section_table *table = target_get_section_table (target);
1194 struct target_section *secp;
1195
1196 if (table == NULL)
1197 return NULL;
1198
1199 for (secp = table->sections; secp < table->sections_end; secp++)
1200 {
1201 if (addr >= secp->addr && addr < secp->endaddr)
1202 return secp;
1203 }
1204 return NULL;
1205 }
1206
1207 /* Read memory from the live target, even if currently inspecting a
1208 traceframe. The return is the same as that of target_read. */
1209
1210 static enum target_xfer_status
1211 target_read_live_memory (enum target_object object,
1212 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1213 ULONGEST *xfered_len)
1214 {
1215 enum target_xfer_status ret;
1216 struct cleanup *cleanup;
1217
1218 /* Switch momentarily out of tfind mode so to access live memory.
1219 Note that this must not clear global state, such as the frame
1220 cache, which must still remain valid for the previous traceframe.
1221 We may be _building_ the frame cache at this point. */
1222 cleanup = make_cleanup_restore_traceframe_number ();
1223 set_traceframe_number (-1);
1224
1225 ret = target_xfer_partial (current_target.beneath, object, NULL,
1226 myaddr, NULL, memaddr, len, xfered_len);
1227
1228 do_cleanups (cleanup);
1229 return ret;
1230 }
1231
1232 /* Using the set of read-only target sections of OPS, read live
1233 read-only memory. Note that the actual reads start from the
1234 top-most target again.
1235
1236 For interface/parameters/return description see target.h,
1237 to_xfer_partial. */
1238
1239 static enum target_xfer_status
1240 memory_xfer_live_readonly_partial (struct target_ops *ops,
1241 enum target_object object,
1242 gdb_byte *readbuf, ULONGEST memaddr,
1243 ULONGEST len, ULONGEST *xfered_len)
1244 {
1245 struct target_section *secp;
1246 struct target_section_table *table;
1247
1248 secp = target_section_by_addr (ops, memaddr);
1249 if (secp != NULL
1250 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1251 secp->the_bfd_section)
1252 & SEC_READONLY))
1253 {
1254 struct target_section *p;
1255 ULONGEST memend = memaddr + len;
1256
1257 table = target_get_section_table (ops);
1258
1259 for (p = table->sections; p < table->sections_end; p++)
1260 {
1261 if (memaddr >= p->addr)
1262 {
1263 if (memend <= p->endaddr)
1264 {
1265 /* Entire transfer is within this section. */
1266 return target_read_live_memory (object, memaddr,
1267 readbuf, len, xfered_len);
1268 }
1269 else if (memaddr >= p->endaddr)
1270 {
1271 /* This section ends before the transfer starts. */
1272 continue;
1273 }
1274 else
1275 {
1276 /* This section overlaps the transfer. Just do half. */
1277 len = p->endaddr - memaddr;
1278 return target_read_live_memory (object, memaddr,
1279 readbuf, len, xfered_len);
1280 }
1281 }
1282 }
1283 }
1284
1285 return TARGET_XFER_EOF;
1286 }
1287
1288 /* Read memory from more than one valid target. A core file, for
1289 instance, could have some of memory but delegate other bits to
1290 the target below it. So, we must manually try all targets. */
1291
1292 static enum target_xfer_status
1293 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1294 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1295 ULONGEST *xfered_len)
1296 {
1297 enum target_xfer_status res;
1298
1299 do
1300 {
1301 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1302 readbuf, writebuf, memaddr, len,
1303 xfered_len);
1304 if (res == TARGET_XFER_OK)
1305 break;
1306
1307 /* Stop if the target reports that the memory is not available. */
1308 if (res == TARGET_XFER_E_UNAVAILABLE)
1309 break;
1310
1311 /* We want to continue past core files to executables, but not
1312 past a running target's memory. */
1313 if (ops->to_has_all_memory (ops))
1314 break;
1315
1316 ops = ops->beneath;
1317 }
1318 while (ops != NULL);
1319
1320 return res;
1321 }
1322
1323 /* Perform a partial memory transfer.
1324 For docs see target.h, to_xfer_partial. */
1325
1326 static enum target_xfer_status
1327 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1328 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1329 ULONGEST len, ULONGEST *xfered_len)
1330 {
1331 enum target_xfer_status res;
1332 int reg_len;
1333 struct mem_region *region;
1334 struct inferior *inf;
1335
1336 /* For accesses to unmapped overlay sections, read directly from
1337 files. Must do this first, as MEMADDR may need adjustment. */
1338 if (readbuf != NULL && overlay_debugging)
1339 {
1340 struct obj_section *section = find_pc_overlay (memaddr);
1341
1342 if (pc_in_unmapped_range (memaddr, section))
1343 {
1344 struct target_section_table *table
1345 = target_get_section_table (ops);
1346 const char *section_name = section->the_bfd_section->name;
1347
1348 memaddr = overlay_mapped_address (memaddr, section);
1349 return section_table_xfer_memory_partial (readbuf, writebuf,
1350 memaddr, len, xfered_len,
1351 table->sections,
1352 table->sections_end,
1353 section_name);
1354 }
1355 }
1356
1357 /* Try the executable files, if "trust-readonly-sections" is set. */
1358 if (readbuf != NULL && trust_readonly)
1359 {
1360 struct target_section *secp;
1361 struct target_section_table *table;
1362
1363 secp = target_section_by_addr (ops, memaddr);
1364 if (secp != NULL
1365 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1366 secp->the_bfd_section)
1367 & SEC_READONLY))
1368 {
1369 table = target_get_section_table (ops);
1370 return section_table_xfer_memory_partial (readbuf, writebuf,
1371 memaddr, len, xfered_len,
1372 table->sections,
1373 table->sections_end,
1374 NULL);
1375 }
1376 }
1377
1378 /* If reading unavailable memory in the context of traceframes, and
1379 this address falls within a read-only section, fallback to
1380 reading from live memory. */
1381 if (readbuf != NULL && get_traceframe_number () != -1)
1382 {
1383 VEC(mem_range_s) *available;
1384
1385 /* If we fail to get the set of available memory, then the
1386 target does not support querying traceframe info, and so we
1387 attempt reading from the traceframe anyway (assuming the
1388 target implements the old QTro packet then). */
1389 if (traceframe_available_memory (&available, memaddr, len))
1390 {
1391 struct cleanup *old_chain;
1392
1393 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1394
1395 if (VEC_empty (mem_range_s, available)
1396 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1397 {
1398 /* Don't read into the traceframe's available
1399 memory. */
1400 if (!VEC_empty (mem_range_s, available))
1401 {
1402 LONGEST oldlen = len;
1403
1404 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1405 gdb_assert (len <= oldlen);
1406 }
1407
1408 do_cleanups (old_chain);
1409
1410 /* This goes through the topmost target again. */
1411 res = memory_xfer_live_readonly_partial (ops, object,
1412 readbuf, memaddr,
1413 len, xfered_len);
1414 if (res == TARGET_XFER_OK)
1415 return TARGET_XFER_OK;
1416 else
1417 {
1418 /* No use trying further, we know some memory starting
1419 at MEMADDR isn't available. */
1420 *xfered_len = len;
1421 return TARGET_XFER_E_UNAVAILABLE;
1422 }
1423 }
1424
1425 /* Don't try to read more than how much is available, in
1426 case the target implements the deprecated QTro packet to
1427 cater for older GDBs (the target's knowledge of read-only
1428 sections may be outdated by now). */
1429 len = VEC_index (mem_range_s, available, 0)->length;
1430
1431 do_cleanups (old_chain);
1432 }
1433 }
1434
1435 /* Try GDB's internal data cache. */
1436 region = lookup_mem_region (memaddr);
1437 /* region->hi == 0 means there's no upper bound. */
1438 if (memaddr + len < region->hi || region->hi == 0)
1439 reg_len = len;
1440 else
1441 reg_len = region->hi - memaddr;
1442
1443 switch (region->attrib.mode)
1444 {
1445 case MEM_RO:
1446 if (writebuf != NULL)
1447 return TARGET_XFER_E_IO;
1448 break;
1449
1450 case MEM_WO:
1451 if (readbuf != NULL)
1452 return TARGET_XFER_E_IO;
1453 break;
1454
1455 case MEM_FLASH:
1456 /* We only support writing to flash during "load" for now. */
1457 if (writebuf != NULL)
1458 error (_("Writing to flash memory forbidden in this context"));
1459 break;
1460
1461 case MEM_NONE:
1462 return TARGET_XFER_E_IO;
1463 }
1464
1465 if (!ptid_equal (inferior_ptid, null_ptid))
1466 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1467 else
1468 inf = NULL;
1469
1470 if (inf != NULL
1471 /* The dcache reads whole cache lines; that doesn't play well
1472 with reading from a trace buffer, because reading outside of
1473 the collected memory range fails. */
1474 && get_traceframe_number () == -1
1475 && (region->attrib.cache
1476 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1477 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1478 {
1479 DCACHE *dcache = target_dcache_get_or_init ();
1480 int l;
1481
1482 if (readbuf != NULL)
1483 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1484 else
1485 /* FIXME drow/2006-08-09: If we're going to preserve const
1486 correctness dcache_xfer_memory should take readbuf and
1487 writebuf. */
1488 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1489 reg_len, 1);
1490 if (l <= 0)
1491 return TARGET_XFER_E_IO;
1492 else
1493 {
1494 *xfered_len = (ULONGEST) l;
1495 return TARGET_XFER_OK;
1496 }
1497 }
1498
1499 /* If none of those methods found the memory we wanted, fall back
1500 to a target partial transfer. Normally a single call to
1501 to_xfer_partial is enough; if it doesn't recognize an object
1502 it will call the to_xfer_partial of the next target down.
1503 But for memory this won't do. Memory is the only target
1504 object which can be read from more than one valid target.
1505 A core file, for instance, could have some of memory but
1506 delegate other bits to the target below it. So, we must
1507 manually try all targets. */
1508
1509 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1510 xfered_len);
1511
1512 /* Make sure the cache gets updated no matter what - if we are writing
1513 to the stack. Even if this write is not tagged as such, we still need
1514 to update the cache. */
1515
1516 if (res == TARGET_XFER_OK
1517 && inf != NULL
1518 && writebuf != NULL
1519 && target_dcache_init_p ()
1520 && !region->attrib.cache
1521 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1522 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1523 {
1524 DCACHE *dcache = target_dcache_get ();
1525
1526 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1527 }
1528
1529 /* If we still haven't got anything, return the last error. We
1530 give up. */
1531 return res;
1532 }
1533
1534 /* Perform a partial memory transfer. For docs see target.h,
1535 to_xfer_partial. */
1536
1537 static enum target_xfer_status
1538 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1539 gdb_byte *readbuf, const gdb_byte *writebuf,
1540 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1541 {
1542 enum target_xfer_status res;
1543
1544 /* Zero length requests are ok and require no work. */
1545 if (len == 0)
1546 return TARGET_XFER_EOF;
1547
1548 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1549 breakpoint insns, thus hiding out from higher layers whether
1550 there are software breakpoints inserted in the code stream. */
1551 if (readbuf != NULL)
1552 {
1553 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1554 xfered_len);
1555
1556 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1557 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1558 }
1559 else
1560 {
1561 void *buf;
1562 struct cleanup *old_chain;
1563
1564 /* A large write request is likely to be partially satisfied
1565 by memory_xfer_partial_1. We will continually malloc
1566 and free a copy of the entire write request for breakpoint
1567 shadow handling even though we only end up writing a small
1568 subset of it. Cap writes to 4KB to mitigate this. */
1569 len = min (4096, len);
1570
1571 buf = xmalloc (len);
1572 old_chain = make_cleanup (xfree, buf);
1573 memcpy (buf, writebuf, len);
1574
1575 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1576 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1577 xfered_len);
1578
1579 do_cleanups (old_chain);
1580 }
1581
1582 return res;
1583 }
1584
1585 static void
1586 restore_show_memory_breakpoints (void *arg)
1587 {
1588 show_memory_breakpoints = (uintptr_t) arg;
1589 }
1590
1591 struct cleanup *
1592 make_show_memory_breakpoints_cleanup (int show)
1593 {
1594 int current = show_memory_breakpoints;
1595
1596 show_memory_breakpoints = show;
1597 return make_cleanup (restore_show_memory_breakpoints,
1598 (void *) (uintptr_t) current);
1599 }
1600
1601 /* For docs see target.h, to_xfer_partial. */
1602
1603 enum target_xfer_status
1604 target_xfer_partial (struct target_ops *ops,
1605 enum target_object object, const char *annex,
1606 gdb_byte *readbuf, const gdb_byte *writebuf,
1607 ULONGEST offset, ULONGEST len,
1608 ULONGEST *xfered_len)
1609 {
1610 enum target_xfer_status retval;
1611
1612 gdb_assert (ops->to_xfer_partial != NULL);
1613
1614 /* Transfer is done when LEN is zero. */
1615 if (len == 0)
1616 return TARGET_XFER_EOF;
1617
1618 if (writebuf && !may_write_memory)
1619 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1620 core_addr_to_string_nz (offset), plongest (len));
1621
1622 *xfered_len = 0;
1623
1624 /* If this is a memory transfer, let the memory-specific code
1625 have a look at it instead. Memory transfers are more
1626 complicated. */
1627 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1628 || object == TARGET_OBJECT_CODE_MEMORY)
1629 retval = memory_xfer_partial (ops, object, readbuf,
1630 writebuf, offset, len, xfered_len);
1631 else if (object == TARGET_OBJECT_RAW_MEMORY)
1632 {
1633 /* Request the normal memory object from other layers. */
1634 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1635 xfered_len);
1636 }
1637 else
1638 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1639 writebuf, offset, len, xfered_len);
1640
1641 if (targetdebug)
1642 {
1643 const unsigned char *myaddr = NULL;
1644
1645 fprintf_unfiltered (gdb_stdlog,
1646 "%s:target_xfer_partial "
1647 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1648 ops->to_shortname,
1649 (int) object,
1650 (annex ? annex : "(null)"),
1651 host_address_to_string (readbuf),
1652 host_address_to_string (writebuf),
1653 core_addr_to_string_nz (offset),
1654 pulongest (len), retval,
1655 pulongest (*xfered_len));
1656
1657 if (readbuf)
1658 myaddr = readbuf;
1659 if (writebuf)
1660 myaddr = writebuf;
1661 if (retval == TARGET_XFER_OK && myaddr != NULL)
1662 {
1663 int i;
1664
1665 fputs_unfiltered (", bytes =", gdb_stdlog);
1666 for (i = 0; i < *xfered_len; i++)
1667 {
1668 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1669 {
1670 if (targetdebug < 2 && i > 0)
1671 {
1672 fprintf_unfiltered (gdb_stdlog, " ...");
1673 break;
1674 }
1675 fprintf_unfiltered (gdb_stdlog, "\n");
1676 }
1677
1678 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1679 }
1680 }
1681
1682 fputc_unfiltered ('\n', gdb_stdlog);
1683 }
1684
1685 /* Check implementations of to_xfer_partial update *XFERED_LEN
1686 properly. Do assertion after printing debug messages, so that we
1687 can find more clues on assertion failure from debugging messages. */
1688 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1689 gdb_assert (*xfered_len > 0);
1690
1691 return retval;
1692 }
1693
1694 /* Read LEN bytes of target memory at address MEMADDR, placing the
1695 results in GDB's memory at MYADDR. Returns either 0 for success or
1696 TARGET_XFER_E_IO if any error occurs.
1697
1698 If an error occurs, no guarantee is made about the contents of the data at
1699 MYADDR. In particular, the caller should not depend upon partial reads
1700 filling the buffer with good data. There is no way for the caller to know
1701 how much good data might have been transfered anyway. Callers that can
1702 deal with partial reads should call target_read (which will retry until
1703 it makes no progress, and then return how much was transferred). */
1704
1705 int
1706 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1707 {
1708 /* Dispatch to the topmost target, not the flattened current_target.
1709 Memory accesses check target->to_has_(all_)memory, and the
1710 flattened target doesn't inherit those. */
1711 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1712 myaddr, memaddr, len) == len)
1713 return 0;
1714 else
1715 return TARGET_XFER_E_IO;
1716 }
1717
1718 /* Like target_read_memory, but specify explicitly that this is a read
1719 from the target's raw memory. That is, this read bypasses the
1720 dcache, breakpoint shadowing, etc. */
1721
1722 int
1723 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1724 {
1725 /* See comment in target_read_memory about why the request starts at
1726 current_target.beneath. */
1727 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1728 myaddr, memaddr, len) == len)
1729 return 0;
1730 else
1731 return TARGET_XFER_E_IO;
1732 }
1733
1734 /* Like target_read_memory, but specify explicitly that this is a read from
1735 the target's stack. This may trigger different cache behavior. */
1736
1737 int
1738 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1739 {
1740 /* See comment in target_read_memory about why the request starts at
1741 current_target.beneath. */
1742 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1743 myaddr, memaddr, len) == len)
1744 return 0;
1745 else
1746 return TARGET_XFER_E_IO;
1747 }
1748
1749 /* Like target_read_memory, but specify explicitly that this is a read from
1750 the target's code. This may trigger different cache behavior. */
1751
1752 int
1753 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1754 {
1755 /* See comment in target_read_memory about why the request starts at
1756 current_target.beneath. */
1757 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1758 myaddr, memaddr, len) == len)
1759 return 0;
1760 else
1761 return TARGET_XFER_E_IO;
1762 }
1763
1764 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1765 Returns either 0 for success or TARGET_XFER_E_IO if any
1766 error occurs. If an error occurs, no guarantee is made about how
1767 much data got written. Callers that can deal with partial writes
1768 should call target_write. */
1769
1770 int
1771 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1772 {
1773 /* See comment in target_read_memory about why the request starts at
1774 current_target.beneath. */
1775 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1776 myaddr, memaddr, len) == len)
1777 return 0;
1778 else
1779 return TARGET_XFER_E_IO;
1780 }
1781
1782 /* Write LEN bytes from MYADDR to target raw memory at address
1783 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1784 if any error occurs. If an error occurs, no guarantee is made
1785 about how much data got written. Callers that can deal with
1786 partial writes should call target_write. */
1787
1788 int
1789 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1790 {
1791 /* See comment in target_read_memory about why the request starts at
1792 current_target.beneath. */
1793 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1794 myaddr, memaddr, len) == len)
1795 return 0;
1796 else
1797 return TARGET_XFER_E_IO;
1798 }
1799
1800 /* Fetch the target's memory map. */
1801
1802 VEC(mem_region_s) *
1803 target_memory_map (void)
1804 {
1805 VEC(mem_region_s) *result;
1806 struct mem_region *last_one, *this_one;
1807 int ix;
1808 struct target_ops *t;
1809
1810 if (targetdebug)
1811 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1812
1813 for (t = current_target.beneath; t != NULL; t = t->beneath)
1814 if (t->to_memory_map != NULL)
1815 break;
1816
1817 if (t == NULL)
1818 return NULL;
1819
1820 result = t->to_memory_map (t);
1821 if (result == NULL)
1822 return NULL;
1823
1824 qsort (VEC_address (mem_region_s, result),
1825 VEC_length (mem_region_s, result),
1826 sizeof (struct mem_region), mem_region_cmp);
1827
1828 /* Check that regions do not overlap. Simultaneously assign
1829 a numbering for the "mem" commands to use to refer to
1830 each region. */
1831 last_one = NULL;
1832 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1833 {
1834 this_one->number = ix;
1835
1836 if (last_one && last_one->hi > this_one->lo)
1837 {
1838 warning (_("Overlapping regions in memory map: ignoring"));
1839 VEC_free (mem_region_s, result);
1840 return NULL;
1841 }
1842 last_one = this_one;
1843 }
1844
1845 return result;
1846 }
1847
1848 void
1849 target_flash_erase (ULONGEST address, LONGEST length)
1850 {
1851 struct target_ops *t;
1852
1853 for (t = current_target.beneath; t != NULL; t = t->beneath)
1854 if (t->to_flash_erase != NULL)
1855 {
1856 if (targetdebug)
1857 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1858 hex_string (address), phex (length, 0));
1859 t->to_flash_erase (t, address, length);
1860 return;
1861 }
1862
1863 tcomplain ();
1864 }
1865
1866 void
1867 target_flash_done (void)
1868 {
1869 struct target_ops *t;
1870
1871 for (t = current_target.beneath; t != NULL; t = t->beneath)
1872 if (t->to_flash_done != NULL)
1873 {
1874 if (targetdebug)
1875 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1876 t->to_flash_done (t);
1877 return;
1878 }
1879
1880 tcomplain ();
1881 }
1882
1883 static void
1884 show_trust_readonly (struct ui_file *file, int from_tty,
1885 struct cmd_list_element *c, const char *value)
1886 {
1887 fprintf_filtered (file,
1888 _("Mode for reading from readonly sections is %s.\n"),
1889 value);
1890 }
1891
1892 /* More generic transfers. */
1893
1894 static enum target_xfer_status
1895 default_xfer_partial (struct target_ops *ops, enum target_object object,
1896 const char *annex, gdb_byte *readbuf,
1897 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1898 ULONGEST *xfered_len)
1899 {
1900 if (object == TARGET_OBJECT_MEMORY
1901 && ops->deprecated_xfer_memory != NULL)
1902 /* If available, fall back to the target's
1903 "deprecated_xfer_memory" method. */
1904 {
1905 int xfered = -1;
1906
1907 errno = 0;
1908 if (writebuf != NULL)
1909 {
1910 void *buffer = xmalloc (len);
1911 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1912
1913 memcpy (buffer, writebuf, len);
1914 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1915 1/*write*/, NULL, ops);
1916 do_cleanups (cleanup);
1917 }
1918 if (readbuf != NULL)
1919 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1920 0/*read*/, NULL, ops);
1921 if (xfered > 0)
1922 {
1923 *xfered_len = (ULONGEST) xfered;
1924 return TARGET_XFER_E_IO;
1925 }
1926 else if (xfered == 0 && errno == 0)
1927 /* "deprecated_xfer_memory" uses 0, cross checked against
1928 ERRNO as one indication of an error. */
1929 return TARGET_XFER_EOF;
1930 else
1931 return TARGET_XFER_E_IO;
1932 }
1933 else
1934 {
1935 gdb_assert (ops->beneath != NULL);
1936 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1937 readbuf, writebuf, offset, len,
1938 xfered_len);
1939 }
1940 }
1941
1942 /* Target vector read/write partial wrapper functions. */
1943
1944 static enum target_xfer_status
1945 target_read_partial (struct target_ops *ops,
1946 enum target_object object,
1947 const char *annex, gdb_byte *buf,
1948 ULONGEST offset, ULONGEST len,
1949 ULONGEST *xfered_len)
1950 {
1951 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1952 xfered_len);
1953 }
1954
1955 static enum target_xfer_status
1956 target_write_partial (struct target_ops *ops,
1957 enum target_object object,
1958 const char *annex, const gdb_byte *buf,
1959 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1960 {
1961 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1962 xfered_len);
1963 }
1964
1965 /* Wrappers to perform the full transfer. */
1966
1967 /* For docs on target_read see target.h. */
1968
1969 LONGEST
1970 target_read (struct target_ops *ops,
1971 enum target_object object,
1972 const char *annex, gdb_byte *buf,
1973 ULONGEST offset, LONGEST len)
1974 {
1975 LONGEST xfered = 0;
1976
1977 while (xfered < len)
1978 {
1979 ULONGEST xfered_len;
1980 enum target_xfer_status status;
1981
1982 status = target_read_partial (ops, object, annex,
1983 (gdb_byte *) buf + xfered,
1984 offset + xfered, len - xfered,
1985 &xfered_len);
1986
1987 /* Call an observer, notifying them of the xfer progress? */
1988 if (status == TARGET_XFER_EOF)
1989 return xfered;
1990 else if (status == TARGET_XFER_OK)
1991 {
1992 xfered += xfered_len;
1993 QUIT;
1994 }
1995 else
1996 return -1;
1997
1998 }
1999 return len;
2000 }
2001
2002 /* Assuming that the entire [begin, end) range of memory cannot be
2003 read, try to read whatever subrange is possible to read.
2004
2005 The function returns, in RESULT, either zero or one memory block.
2006 If there's a readable subrange at the beginning, it is completely
2007 read and returned. Any further readable subrange will not be read.
2008 Otherwise, if there's a readable subrange at the end, it will be
2009 completely read and returned. Any readable subranges before it
2010 (obviously, not starting at the beginning), will be ignored. In
2011 other cases -- either no readable subrange, or readable subrange(s)
2012 that is neither at the beginning, or end, nothing is returned.
2013
2014 The purpose of this function is to handle a read across a boundary
2015 of accessible memory in a case when memory map is not available.
2016 The above restrictions are fine for this case, but will give
2017 incorrect results if the memory is 'patchy'. However, supporting
2018 'patchy' memory would require trying to read every single byte,
2019 and it seems unacceptable solution. Explicit memory map is
2020 recommended for this case -- and target_read_memory_robust will
2021 take care of reading multiple ranges then. */
2022
2023 static void
2024 read_whatever_is_readable (struct target_ops *ops,
2025 ULONGEST begin, ULONGEST end,
2026 VEC(memory_read_result_s) **result)
2027 {
2028 gdb_byte *buf = xmalloc (end - begin);
2029 ULONGEST current_begin = begin;
2030 ULONGEST current_end = end;
2031 int forward;
2032 memory_read_result_s r;
2033 ULONGEST xfered_len;
2034
2035 /* If we previously failed to read 1 byte, nothing can be done here. */
2036 if (end - begin <= 1)
2037 {
2038 xfree (buf);
2039 return;
2040 }
2041
2042 /* Check that either first or the last byte is readable, and give up
2043 if not. This heuristic is meant to permit reading accessible memory
2044 at the boundary of accessible region. */
2045 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2046 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2047 {
2048 forward = 1;
2049 ++current_begin;
2050 }
2051 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2052 buf + (end-begin) - 1, end - 1, 1,
2053 &xfered_len) == TARGET_XFER_OK)
2054 {
2055 forward = 0;
2056 --current_end;
2057 }
2058 else
2059 {
2060 xfree (buf);
2061 return;
2062 }
2063
2064 /* Loop invariant is that the [current_begin, current_end) was previously
2065 found to be not readable as a whole.
2066
2067 Note loop condition -- if the range has 1 byte, we can't divide the range
2068 so there's no point trying further. */
2069 while (current_end - current_begin > 1)
2070 {
2071 ULONGEST first_half_begin, first_half_end;
2072 ULONGEST second_half_begin, second_half_end;
2073 LONGEST xfer;
2074 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2075
2076 if (forward)
2077 {
2078 first_half_begin = current_begin;
2079 first_half_end = middle;
2080 second_half_begin = middle;
2081 second_half_end = current_end;
2082 }
2083 else
2084 {
2085 first_half_begin = middle;
2086 first_half_end = current_end;
2087 second_half_begin = current_begin;
2088 second_half_end = middle;
2089 }
2090
2091 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2092 buf + (first_half_begin - begin),
2093 first_half_begin,
2094 first_half_end - first_half_begin);
2095
2096 if (xfer == first_half_end - first_half_begin)
2097 {
2098 /* This half reads up fine. So, the error must be in the
2099 other half. */
2100 current_begin = second_half_begin;
2101 current_end = second_half_end;
2102 }
2103 else
2104 {
2105 /* This half is not readable. Because we've tried one byte, we
2106 know some part of this half if actually redable. Go to the next
2107 iteration to divide again and try to read.
2108
2109 We don't handle the other half, because this function only tries
2110 to read a single readable subrange. */
2111 current_begin = first_half_begin;
2112 current_end = first_half_end;
2113 }
2114 }
2115
2116 if (forward)
2117 {
2118 /* The [begin, current_begin) range has been read. */
2119 r.begin = begin;
2120 r.end = current_begin;
2121 r.data = buf;
2122 }
2123 else
2124 {
2125 /* The [current_end, end) range has been read. */
2126 LONGEST rlen = end - current_end;
2127
2128 r.data = xmalloc (rlen);
2129 memcpy (r.data, buf + current_end - begin, rlen);
2130 r.begin = current_end;
2131 r.end = end;
2132 xfree (buf);
2133 }
2134 VEC_safe_push(memory_read_result_s, (*result), &r);
2135 }
2136
2137 void
2138 free_memory_read_result_vector (void *x)
2139 {
2140 VEC(memory_read_result_s) *v = x;
2141 memory_read_result_s *current;
2142 int ix;
2143
2144 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2145 {
2146 xfree (current->data);
2147 }
2148 VEC_free (memory_read_result_s, v);
2149 }
2150
2151 VEC(memory_read_result_s) *
2152 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2153 {
2154 VEC(memory_read_result_s) *result = 0;
2155
2156 LONGEST xfered = 0;
2157 while (xfered < len)
2158 {
2159 struct mem_region *region = lookup_mem_region (offset + xfered);
2160 LONGEST rlen;
2161
2162 /* If there is no explicit region, a fake one should be created. */
2163 gdb_assert (region);
2164
2165 if (region->hi == 0)
2166 rlen = len - xfered;
2167 else
2168 rlen = region->hi - offset;
2169
2170 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2171 {
2172 /* Cannot read this region. Note that we can end up here only
2173 if the region is explicitly marked inaccessible, or
2174 'inaccessible-by-default' is in effect. */
2175 xfered += rlen;
2176 }
2177 else
2178 {
2179 LONGEST to_read = min (len - xfered, rlen);
2180 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2181
2182 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2183 (gdb_byte *) buffer,
2184 offset + xfered, to_read);
2185 /* Call an observer, notifying them of the xfer progress? */
2186 if (xfer <= 0)
2187 {
2188 /* Got an error reading full chunk. See if maybe we can read
2189 some subrange. */
2190 xfree (buffer);
2191 read_whatever_is_readable (ops, offset + xfered,
2192 offset + xfered + to_read, &result);
2193 xfered += to_read;
2194 }
2195 else
2196 {
2197 struct memory_read_result r;
2198 r.data = buffer;
2199 r.begin = offset + xfered;
2200 r.end = r.begin + xfer;
2201 VEC_safe_push (memory_read_result_s, result, &r);
2202 xfered += xfer;
2203 }
2204 QUIT;
2205 }
2206 }
2207 return result;
2208 }
2209
2210
2211 /* An alternative to target_write with progress callbacks. */
2212
2213 LONGEST
2214 target_write_with_progress (struct target_ops *ops,
2215 enum target_object object,
2216 const char *annex, const gdb_byte *buf,
2217 ULONGEST offset, LONGEST len,
2218 void (*progress) (ULONGEST, void *), void *baton)
2219 {
2220 LONGEST xfered = 0;
2221
2222 /* Give the progress callback a chance to set up. */
2223 if (progress)
2224 (*progress) (0, baton);
2225
2226 while (xfered < len)
2227 {
2228 ULONGEST xfered_len;
2229 enum target_xfer_status status;
2230
2231 status = target_write_partial (ops, object, annex,
2232 (gdb_byte *) buf + xfered,
2233 offset + xfered, len - xfered,
2234 &xfered_len);
2235
2236 if (status == TARGET_XFER_EOF)
2237 return xfered;
2238 if (TARGET_XFER_STATUS_ERROR_P (status))
2239 return -1;
2240
2241 gdb_assert (status == TARGET_XFER_OK);
2242 if (progress)
2243 (*progress) (xfered_len, baton);
2244
2245 xfered += xfered_len;
2246 QUIT;
2247 }
2248 return len;
2249 }
2250
2251 /* For docs on target_write see target.h. */
2252
2253 LONGEST
2254 target_write (struct target_ops *ops,
2255 enum target_object object,
2256 const char *annex, const gdb_byte *buf,
2257 ULONGEST offset, LONGEST len)
2258 {
2259 return target_write_with_progress (ops, object, annex, buf, offset, len,
2260 NULL, NULL);
2261 }
2262
2263 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2264 the size of the transferred data. PADDING additional bytes are
2265 available in *BUF_P. This is a helper function for
2266 target_read_alloc; see the declaration of that function for more
2267 information. */
2268
2269 static LONGEST
2270 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2271 const char *annex, gdb_byte **buf_p, int padding)
2272 {
2273 size_t buf_alloc, buf_pos;
2274 gdb_byte *buf;
2275
2276 /* This function does not have a length parameter; it reads the
2277 entire OBJECT). Also, it doesn't support objects fetched partly
2278 from one target and partly from another (in a different stratum,
2279 e.g. a core file and an executable). Both reasons make it
2280 unsuitable for reading memory. */
2281 gdb_assert (object != TARGET_OBJECT_MEMORY);
2282
2283 /* Start by reading up to 4K at a time. The target will throttle
2284 this number down if necessary. */
2285 buf_alloc = 4096;
2286 buf = xmalloc (buf_alloc);
2287 buf_pos = 0;
2288 while (1)
2289 {
2290 ULONGEST xfered_len;
2291 enum target_xfer_status status;
2292
2293 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2294 buf_pos, buf_alloc - buf_pos - padding,
2295 &xfered_len);
2296
2297 if (status == TARGET_XFER_EOF)
2298 {
2299 /* Read all there was. */
2300 if (buf_pos == 0)
2301 xfree (buf);
2302 else
2303 *buf_p = buf;
2304 return buf_pos;
2305 }
2306 else if (status != TARGET_XFER_OK)
2307 {
2308 /* An error occurred. */
2309 xfree (buf);
2310 return TARGET_XFER_E_IO;
2311 }
2312
2313 buf_pos += xfered_len;
2314
2315 /* If the buffer is filling up, expand it. */
2316 if (buf_alloc < buf_pos * 2)
2317 {
2318 buf_alloc *= 2;
2319 buf = xrealloc (buf, buf_alloc);
2320 }
2321
2322 QUIT;
2323 }
2324 }
2325
2326 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2327 the size of the transferred data. See the declaration in "target.h"
2328 function for more information about the return value. */
2329
2330 LONGEST
2331 target_read_alloc (struct target_ops *ops, enum target_object object,
2332 const char *annex, gdb_byte **buf_p)
2333 {
2334 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2335 }
2336
2337 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2338 returned as a string, allocated using xmalloc. If an error occurs
2339 or the transfer is unsupported, NULL is returned. Empty objects
2340 are returned as allocated but empty strings. A warning is issued
2341 if the result contains any embedded NUL bytes. */
2342
2343 char *
2344 target_read_stralloc (struct target_ops *ops, enum target_object object,
2345 const char *annex)
2346 {
2347 gdb_byte *buffer;
2348 char *bufstr;
2349 LONGEST i, transferred;
2350
2351 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2352 bufstr = (char *) buffer;
2353
2354 if (transferred < 0)
2355 return NULL;
2356
2357 if (transferred == 0)
2358 return xstrdup ("");
2359
2360 bufstr[transferred] = 0;
2361
2362 /* Check for embedded NUL bytes; but allow trailing NULs. */
2363 for (i = strlen (bufstr); i < transferred; i++)
2364 if (bufstr[i] != 0)
2365 {
2366 warning (_("target object %d, annex %s, "
2367 "contained unexpected null characters"),
2368 (int) object, annex ? annex : "(none)");
2369 break;
2370 }
2371
2372 return bufstr;
2373 }
2374
2375 /* Memory transfer methods. */
2376
2377 void
2378 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2379 LONGEST len)
2380 {
2381 /* This method is used to read from an alternate, non-current
2382 target. This read must bypass the overlay support (as symbols
2383 don't match this target), and GDB's internal cache (wrong cache
2384 for this target). */
2385 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2386 != len)
2387 memory_error (TARGET_XFER_E_IO, addr);
2388 }
2389
2390 ULONGEST
2391 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2392 int len, enum bfd_endian byte_order)
2393 {
2394 gdb_byte buf[sizeof (ULONGEST)];
2395
2396 gdb_assert (len <= sizeof (buf));
2397 get_target_memory (ops, addr, buf, len);
2398 return extract_unsigned_integer (buf, len, byte_order);
2399 }
2400
2401 /* See target.h. */
2402
2403 int
2404 target_insert_breakpoint (struct gdbarch *gdbarch,
2405 struct bp_target_info *bp_tgt)
2406 {
2407 if (!may_insert_breakpoints)
2408 {
2409 warning (_("May not insert breakpoints"));
2410 return 1;
2411 }
2412
2413 return current_target.to_insert_breakpoint (&current_target,
2414 gdbarch, bp_tgt);
2415 }
2416
2417 /* See target.h. */
2418
2419 int
2420 target_remove_breakpoint (struct gdbarch *gdbarch,
2421 struct bp_target_info *bp_tgt)
2422 {
2423 /* This is kind of a weird case to handle, but the permission might
2424 have been changed after breakpoints were inserted - in which case
2425 we should just take the user literally and assume that any
2426 breakpoints should be left in place. */
2427 if (!may_insert_breakpoints)
2428 {
2429 warning (_("May not remove breakpoints"));
2430 return 1;
2431 }
2432
2433 return current_target.to_remove_breakpoint (&current_target,
2434 gdbarch, bp_tgt);
2435 }
2436
2437 static void
2438 target_info (char *args, int from_tty)
2439 {
2440 struct target_ops *t;
2441 int has_all_mem = 0;
2442
2443 if (symfile_objfile != NULL)
2444 printf_unfiltered (_("Symbols from \"%s\".\n"),
2445 objfile_name (symfile_objfile));
2446
2447 for (t = target_stack; t != NULL; t = t->beneath)
2448 {
2449 if (!(*t->to_has_memory) (t))
2450 continue;
2451
2452 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2453 continue;
2454 if (has_all_mem)
2455 printf_unfiltered (_("\tWhile running this, "
2456 "GDB does not access memory from...\n"));
2457 printf_unfiltered ("%s:\n", t->to_longname);
2458 (t->to_files_info) (t);
2459 has_all_mem = (*t->to_has_all_memory) (t);
2460 }
2461 }
2462
2463 /* This function is called before any new inferior is created, e.g.
2464 by running a program, attaching, or connecting to a target.
2465 It cleans up any state from previous invocations which might
2466 change between runs. This is a subset of what target_preopen
2467 resets (things which might change between targets). */
2468
2469 void
2470 target_pre_inferior (int from_tty)
2471 {
2472 /* Clear out solib state. Otherwise the solib state of the previous
2473 inferior might have survived and is entirely wrong for the new
2474 target. This has been observed on GNU/Linux using glibc 2.3. How
2475 to reproduce:
2476
2477 bash$ ./foo&
2478 [1] 4711
2479 bash$ ./foo&
2480 [1] 4712
2481 bash$ gdb ./foo
2482 [...]
2483 (gdb) attach 4711
2484 (gdb) detach
2485 (gdb) attach 4712
2486 Cannot access memory at address 0xdeadbeef
2487 */
2488
2489 /* In some OSs, the shared library list is the same/global/shared
2490 across inferiors. If code is shared between processes, so are
2491 memory regions and features. */
2492 if (!gdbarch_has_global_solist (target_gdbarch ()))
2493 {
2494 no_shared_libraries (NULL, from_tty);
2495
2496 invalidate_target_mem_regions ();
2497
2498 target_clear_description ();
2499 }
2500
2501 agent_capability_invalidate ();
2502 }
2503
2504 /* Callback for iterate_over_inferiors. Gets rid of the given
2505 inferior. */
2506
2507 static int
2508 dispose_inferior (struct inferior *inf, void *args)
2509 {
2510 struct thread_info *thread;
2511
2512 thread = any_thread_of_process (inf->pid);
2513 if (thread)
2514 {
2515 switch_to_thread (thread->ptid);
2516
2517 /* Core inferiors actually should be detached, not killed. */
2518 if (target_has_execution)
2519 target_kill ();
2520 else
2521 target_detach (NULL, 0);
2522 }
2523
2524 return 0;
2525 }
2526
2527 /* This is to be called by the open routine before it does
2528 anything. */
2529
2530 void
2531 target_preopen (int from_tty)
2532 {
2533 dont_repeat ();
2534
2535 if (have_inferiors ())
2536 {
2537 if (!from_tty
2538 || !have_live_inferiors ()
2539 || query (_("A program is being debugged already. Kill it? ")))
2540 iterate_over_inferiors (dispose_inferior, NULL);
2541 else
2542 error (_("Program not killed."));
2543 }
2544
2545 /* Calling target_kill may remove the target from the stack. But if
2546 it doesn't (which seems like a win for UDI), remove it now. */
2547 /* Leave the exec target, though. The user may be switching from a
2548 live process to a core of the same program. */
2549 pop_all_targets_above (file_stratum);
2550
2551 target_pre_inferior (from_tty);
2552 }
2553
2554 /* Detach a target after doing deferred register stores. */
2555
2556 void
2557 target_detach (const char *args, int from_tty)
2558 {
2559 struct target_ops* t;
2560
2561 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2562 /* Don't remove global breakpoints here. They're removed on
2563 disconnection from the target. */
2564 ;
2565 else
2566 /* If we're in breakpoints-always-inserted mode, have to remove
2567 them before detaching. */
2568 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2569
2570 prepare_for_detach ();
2571
2572 current_target.to_detach (&current_target, args, from_tty);
2573 if (targetdebug)
2574 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2575 args, from_tty);
2576 }
2577
2578 void
2579 target_disconnect (char *args, int from_tty)
2580 {
2581 struct target_ops *t;
2582
2583 /* If we're in breakpoints-always-inserted mode or if breakpoints
2584 are global across processes, we have to remove them before
2585 disconnecting. */
2586 remove_breakpoints ();
2587
2588 for (t = current_target.beneath; t != NULL; t = t->beneath)
2589 if (t->to_disconnect != NULL)
2590 {
2591 if (targetdebug)
2592 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2593 args, from_tty);
2594 t->to_disconnect (t, args, from_tty);
2595 return;
2596 }
2597
2598 tcomplain ();
2599 }
2600
2601 ptid_t
2602 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2603 {
2604 struct target_ops *t;
2605 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2606 status, options);
2607
2608 if (targetdebug)
2609 {
2610 char *status_string;
2611 char *options_string;
2612
2613 status_string = target_waitstatus_to_string (status);
2614 options_string = target_options_to_string (options);
2615 fprintf_unfiltered (gdb_stdlog,
2616 "target_wait (%d, status, options={%s})"
2617 " = %d, %s\n",
2618 ptid_get_pid (ptid), options_string,
2619 ptid_get_pid (retval), status_string);
2620 xfree (status_string);
2621 xfree (options_string);
2622 }
2623
2624 return retval;
2625 }
2626
2627 char *
2628 target_pid_to_str (ptid_t ptid)
2629 {
2630 struct target_ops *t;
2631
2632 for (t = current_target.beneath; t != NULL; t = t->beneath)
2633 {
2634 if (t->to_pid_to_str != NULL)
2635 return (*t->to_pid_to_str) (t, ptid);
2636 }
2637
2638 return normal_pid_to_str (ptid);
2639 }
2640
2641 char *
2642 target_thread_name (struct thread_info *info)
2643 {
2644 return current_target.to_thread_name (&current_target, info);
2645 }
2646
2647 void
2648 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2649 {
2650 struct target_ops *t;
2651
2652 target_dcache_invalidate ();
2653
2654 current_target.to_resume (&current_target, ptid, step, signal);
2655 if (targetdebug)
2656 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2657 ptid_get_pid (ptid),
2658 step ? "step" : "continue",
2659 gdb_signal_to_name (signal));
2660
2661 registers_changed_ptid (ptid);
2662 set_executing (ptid, 1);
2663 set_running (ptid, 1);
2664 clear_inline_frame_state (ptid);
2665 }
2666
2667 void
2668 target_pass_signals (int numsigs, unsigned char *pass_signals)
2669 {
2670 struct target_ops *t;
2671
2672 for (t = current_target.beneath; t != NULL; t = t->beneath)
2673 {
2674 if (t->to_pass_signals != NULL)
2675 {
2676 if (targetdebug)
2677 {
2678 int i;
2679
2680 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2681 numsigs);
2682
2683 for (i = 0; i < numsigs; i++)
2684 if (pass_signals[i])
2685 fprintf_unfiltered (gdb_stdlog, " %s",
2686 gdb_signal_to_name (i));
2687
2688 fprintf_unfiltered (gdb_stdlog, " })\n");
2689 }
2690
2691 (*t->to_pass_signals) (t, numsigs, pass_signals);
2692 return;
2693 }
2694 }
2695 }
2696
2697 void
2698 target_program_signals (int numsigs, unsigned char *program_signals)
2699 {
2700 struct target_ops *t;
2701
2702 for (t = current_target.beneath; t != NULL; t = t->beneath)
2703 {
2704 if (t->to_program_signals != NULL)
2705 {
2706 if (targetdebug)
2707 {
2708 int i;
2709
2710 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2711 numsigs);
2712
2713 for (i = 0; i < numsigs; i++)
2714 if (program_signals[i])
2715 fprintf_unfiltered (gdb_stdlog, " %s",
2716 gdb_signal_to_name (i));
2717
2718 fprintf_unfiltered (gdb_stdlog, " })\n");
2719 }
2720
2721 (*t->to_program_signals) (t, numsigs, program_signals);
2722 return;
2723 }
2724 }
2725 }
2726
2727 /* Look through the list of possible targets for a target that can
2728 follow forks. */
2729
2730 int
2731 target_follow_fork (int follow_child, int detach_fork)
2732 {
2733 struct target_ops *t;
2734
2735 for (t = current_target.beneath; t != NULL; t = t->beneath)
2736 {
2737 if (t->to_follow_fork != NULL)
2738 {
2739 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2740
2741 if (targetdebug)
2742 fprintf_unfiltered (gdb_stdlog,
2743 "target_follow_fork (%d, %d) = %d\n",
2744 follow_child, detach_fork, retval);
2745 return retval;
2746 }
2747 }
2748
2749 /* Some target returned a fork event, but did not know how to follow it. */
2750 internal_error (__FILE__, __LINE__,
2751 _("could not find a target to follow fork"));
2752 }
2753
2754 void
2755 target_mourn_inferior (void)
2756 {
2757 struct target_ops *t;
2758
2759 for (t = current_target.beneath; t != NULL; t = t->beneath)
2760 {
2761 if (t->to_mourn_inferior != NULL)
2762 {
2763 t->to_mourn_inferior (t);
2764 if (targetdebug)
2765 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2766
2767 /* We no longer need to keep handles on any of the object files.
2768 Make sure to release them to avoid unnecessarily locking any
2769 of them while we're not actually debugging. */
2770 bfd_cache_close_all ();
2771
2772 return;
2773 }
2774 }
2775
2776 internal_error (__FILE__, __LINE__,
2777 _("could not find a target to follow mourn inferior"));
2778 }
2779
2780 /* Look for a target which can describe architectural features, starting
2781 from TARGET. If we find one, return its description. */
2782
2783 const struct target_desc *
2784 target_read_description (struct target_ops *target)
2785 {
2786 struct target_ops *t;
2787
2788 for (t = target; t != NULL; t = t->beneath)
2789 if (t->to_read_description != NULL)
2790 {
2791 const struct target_desc *tdesc;
2792
2793 tdesc = t->to_read_description (t);
2794 if (tdesc)
2795 return tdesc;
2796 }
2797
2798 return NULL;
2799 }
2800
2801 /* The default implementation of to_search_memory.
2802 This implements a basic search of memory, reading target memory and
2803 performing the search here (as opposed to performing the search in on the
2804 target side with, for example, gdbserver). */
2805
2806 int
2807 simple_search_memory (struct target_ops *ops,
2808 CORE_ADDR start_addr, ULONGEST search_space_len,
2809 const gdb_byte *pattern, ULONGEST pattern_len,
2810 CORE_ADDR *found_addrp)
2811 {
2812 /* NOTE: also defined in find.c testcase. */
2813 #define SEARCH_CHUNK_SIZE 16000
2814 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2815 /* Buffer to hold memory contents for searching. */
2816 gdb_byte *search_buf;
2817 unsigned search_buf_size;
2818 struct cleanup *old_cleanups;
2819
2820 search_buf_size = chunk_size + pattern_len - 1;
2821
2822 /* No point in trying to allocate a buffer larger than the search space. */
2823 if (search_space_len < search_buf_size)
2824 search_buf_size = search_space_len;
2825
2826 search_buf = malloc (search_buf_size);
2827 if (search_buf == NULL)
2828 error (_("Unable to allocate memory to perform the search."));
2829 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2830
2831 /* Prime the search buffer. */
2832
2833 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2834 search_buf, start_addr, search_buf_size) != search_buf_size)
2835 {
2836 warning (_("Unable to access %s bytes of target "
2837 "memory at %s, halting search."),
2838 pulongest (search_buf_size), hex_string (start_addr));
2839 do_cleanups (old_cleanups);
2840 return -1;
2841 }
2842
2843 /* Perform the search.
2844
2845 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2846 When we've scanned N bytes we copy the trailing bytes to the start and
2847 read in another N bytes. */
2848
2849 while (search_space_len >= pattern_len)
2850 {
2851 gdb_byte *found_ptr;
2852 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2853
2854 found_ptr = memmem (search_buf, nr_search_bytes,
2855 pattern, pattern_len);
2856
2857 if (found_ptr != NULL)
2858 {
2859 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2860
2861 *found_addrp = found_addr;
2862 do_cleanups (old_cleanups);
2863 return 1;
2864 }
2865
2866 /* Not found in this chunk, skip to next chunk. */
2867
2868 /* Don't let search_space_len wrap here, it's unsigned. */
2869 if (search_space_len >= chunk_size)
2870 search_space_len -= chunk_size;
2871 else
2872 search_space_len = 0;
2873
2874 if (search_space_len >= pattern_len)
2875 {
2876 unsigned keep_len = search_buf_size - chunk_size;
2877 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2878 int nr_to_read;
2879
2880 /* Copy the trailing part of the previous iteration to the front
2881 of the buffer for the next iteration. */
2882 gdb_assert (keep_len == pattern_len - 1);
2883 memcpy (search_buf, search_buf + chunk_size, keep_len);
2884
2885 nr_to_read = min (search_space_len - keep_len, chunk_size);
2886
2887 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2888 search_buf + keep_len, read_addr,
2889 nr_to_read) != nr_to_read)
2890 {
2891 warning (_("Unable to access %s bytes of target "
2892 "memory at %s, halting search."),
2893 plongest (nr_to_read),
2894 hex_string (read_addr));
2895 do_cleanups (old_cleanups);
2896 return -1;
2897 }
2898
2899 start_addr += chunk_size;
2900 }
2901 }
2902
2903 /* Not found. */
2904
2905 do_cleanups (old_cleanups);
2906 return 0;
2907 }
2908
2909 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2910 sequence of bytes in PATTERN with length PATTERN_LEN.
2911
2912 The result is 1 if found, 0 if not found, and -1 if there was an error
2913 requiring halting of the search (e.g. memory read error).
2914 If the pattern is found the address is recorded in FOUND_ADDRP. */
2915
2916 int
2917 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2918 const gdb_byte *pattern, ULONGEST pattern_len,
2919 CORE_ADDR *found_addrp)
2920 {
2921 struct target_ops *t;
2922 int found;
2923
2924 /* We don't use INHERIT to set current_target.to_search_memory,
2925 so we have to scan the target stack and handle targetdebug
2926 ourselves. */
2927
2928 if (targetdebug)
2929 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2930 hex_string (start_addr));
2931
2932 for (t = current_target.beneath; t != NULL; t = t->beneath)
2933 if (t->to_search_memory != NULL)
2934 break;
2935
2936 if (t != NULL)
2937 {
2938 found = t->to_search_memory (t, start_addr, search_space_len,
2939 pattern, pattern_len, found_addrp);
2940 }
2941 else
2942 {
2943 /* If a special version of to_search_memory isn't available, use the
2944 simple version. */
2945 found = simple_search_memory (current_target.beneath,
2946 start_addr, search_space_len,
2947 pattern, pattern_len, found_addrp);
2948 }
2949
2950 if (targetdebug)
2951 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2952
2953 return found;
2954 }
2955
2956 /* Look through the currently pushed targets. If none of them will
2957 be able to restart the currently running process, issue an error
2958 message. */
2959
2960 void
2961 target_require_runnable (void)
2962 {
2963 struct target_ops *t;
2964
2965 for (t = target_stack; t != NULL; t = t->beneath)
2966 {
2967 /* If this target knows how to create a new program, then
2968 assume we will still be able to after killing the current
2969 one. Either killing and mourning will not pop T, or else
2970 find_default_run_target will find it again. */
2971 if (t->to_create_inferior != NULL)
2972 return;
2973
2974 /* Do not worry about thread_stratum targets that can not
2975 create inferiors. Assume they will be pushed again if
2976 necessary, and continue to the process_stratum. */
2977 if (t->to_stratum == thread_stratum
2978 || t->to_stratum == arch_stratum)
2979 continue;
2980
2981 error (_("The \"%s\" target does not support \"run\". "
2982 "Try \"help target\" or \"continue\"."),
2983 t->to_shortname);
2984 }
2985
2986 /* This function is only called if the target is running. In that
2987 case there should have been a process_stratum target and it
2988 should either know how to create inferiors, or not... */
2989 internal_error (__FILE__, __LINE__, _("No targets found"));
2990 }
2991
2992 /* Look through the list of possible targets for a target that can
2993 execute a run or attach command without any other data. This is
2994 used to locate the default process stratum.
2995
2996 If DO_MESG is not NULL, the result is always valid (error() is
2997 called for errors); else, return NULL on error. */
2998
2999 static struct target_ops *
3000 find_default_run_target (char *do_mesg)
3001 {
3002 struct target_ops **t;
3003 struct target_ops *runable = NULL;
3004 int count;
3005
3006 count = 0;
3007
3008 for (t = target_structs; t < target_structs + target_struct_size;
3009 ++t)
3010 {
3011 if ((*t)->to_can_run && target_can_run (*t))
3012 {
3013 runable = *t;
3014 ++count;
3015 }
3016 }
3017
3018 if (count != 1)
3019 {
3020 if (do_mesg)
3021 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3022 else
3023 return NULL;
3024 }
3025
3026 return runable;
3027 }
3028
3029 void
3030 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3031 {
3032 struct target_ops *t;
3033
3034 t = find_default_run_target ("attach");
3035 (t->to_attach) (t, args, from_tty);
3036 return;
3037 }
3038
3039 void
3040 find_default_create_inferior (struct target_ops *ops,
3041 char *exec_file, char *allargs, char **env,
3042 int from_tty)
3043 {
3044 struct target_ops *t;
3045
3046 t = find_default_run_target ("run");
3047 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3048 return;
3049 }
3050
3051 static int
3052 find_default_can_async_p (struct target_ops *ignore)
3053 {
3054 struct target_ops *t;
3055
3056 /* This may be called before the target is pushed on the stack;
3057 look for the default process stratum. If there's none, gdb isn't
3058 configured with a native debugger, and target remote isn't
3059 connected yet. */
3060 t = find_default_run_target (NULL);
3061 if (t && t->to_can_async_p != delegate_can_async_p)
3062 return (t->to_can_async_p) (t);
3063 return 0;
3064 }
3065
3066 static int
3067 find_default_is_async_p (struct target_ops *ignore)
3068 {
3069 struct target_ops *t;
3070
3071 /* This may be called before the target is pushed on the stack;
3072 look for the default process stratum. If there's none, gdb isn't
3073 configured with a native debugger, and target remote isn't
3074 connected yet. */
3075 t = find_default_run_target (NULL);
3076 if (t && t->to_is_async_p != delegate_is_async_p)
3077 return (t->to_is_async_p) (t);
3078 return 0;
3079 }
3080
3081 static int
3082 find_default_supports_non_stop (struct target_ops *self)
3083 {
3084 struct target_ops *t;
3085
3086 t = find_default_run_target (NULL);
3087 if (t && t->to_supports_non_stop)
3088 return (t->to_supports_non_stop) (t);
3089 return 0;
3090 }
3091
3092 int
3093 target_supports_non_stop (void)
3094 {
3095 struct target_ops *t;
3096
3097 for (t = &current_target; t != NULL; t = t->beneath)
3098 if (t->to_supports_non_stop)
3099 return t->to_supports_non_stop (t);
3100
3101 return 0;
3102 }
3103
3104 /* Implement the "info proc" command. */
3105
3106 int
3107 target_info_proc (char *args, enum info_proc_what what)
3108 {
3109 struct target_ops *t;
3110
3111 /* If we're already connected to something that can get us OS
3112 related data, use it. Otherwise, try using the native
3113 target. */
3114 if (current_target.to_stratum >= process_stratum)
3115 t = current_target.beneath;
3116 else
3117 t = find_default_run_target (NULL);
3118
3119 for (; t != NULL; t = t->beneath)
3120 {
3121 if (t->to_info_proc != NULL)
3122 {
3123 t->to_info_proc (t, args, what);
3124
3125 if (targetdebug)
3126 fprintf_unfiltered (gdb_stdlog,
3127 "target_info_proc (\"%s\", %d)\n", args, what);
3128
3129 return 1;
3130 }
3131 }
3132
3133 return 0;
3134 }
3135
3136 static int
3137 find_default_supports_disable_randomization (struct target_ops *self)
3138 {
3139 struct target_ops *t;
3140
3141 t = find_default_run_target (NULL);
3142 if (t && t->to_supports_disable_randomization)
3143 return (t->to_supports_disable_randomization) (t);
3144 return 0;
3145 }
3146
3147 int
3148 target_supports_disable_randomization (void)
3149 {
3150 struct target_ops *t;
3151
3152 for (t = &current_target; t != NULL; t = t->beneath)
3153 if (t->to_supports_disable_randomization)
3154 return t->to_supports_disable_randomization (t);
3155
3156 return 0;
3157 }
3158
3159 char *
3160 target_get_osdata (const char *type)
3161 {
3162 struct target_ops *t;
3163
3164 /* If we're already connected to something that can get us OS
3165 related data, use it. Otherwise, try using the native
3166 target. */
3167 if (current_target.to_stratum >= process_stratum)
3168 t = current_target.beneath;
3169 else
3170 t = find_default_run_target ("get OS data");
3171
3172 if (!t)
3173 return NULL;
3174
3175 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3176 }
3177
3178 /* Determine the current address space of thread PTID. */
3179
3180 struct address_space *
3181 target_thread_address_space (ptid_t ptid)
3182 {
3183 struct address_space *aspace;
3184 struct inferior *inf;
3185 struct target_ops *t;
3186
3187 for (t = current_target.beneath; t != NULL; t = t->beneath)
3188 {
3189 if (t->to_thread_address_space != NULL)
3190 {
3191 aspace = t->to_thread_address_space (t, ptid);
3192 gdb_assert (aspace);
3193
3194 if (targetdebug)
3195 fprintf_unfiltered (gdb_stdlog,
3196 "target_thread_address_space (%s) = %d\n",
3197 target_pid_to_str (ptid),
3198 address_space_num (aspace));
3199 return aspace;
3200 }
3201 }
3202
3203 /* Fall-back to the "main" address space of the inferior. */
3204 inf = find_inferior_pid (ptid_get_pid (ptid));
3205
3206 if (inf == NULL || inf->aspace == NULL)
3207 internal_error (__FILE__, __LINE__,
3208 _("Can't determine the current "
3209 "address space of thread %s\n"),
3210 target_pid_to_str (ptid));
3211
3212 return inf->aspace;
3213 }
3214
3215
3216 /* Target file operations. */
3217
3218 static struct target_ops *
3219 default_fileio_target (void)
3220 {
3221 /* If we're already connected to something that can perform
3222 file I/O, use it. Otherwise, try using the native target. */
3223 if (current_target.to_stratum >= process_stratum)
3224 return current_target.beneath;
3225 else
3226 return find_default_run_target ("file I/O");
3227 }
3228
3229 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3230 target file descriptor, or -1 if an error occurs (and set
3231 *TARGET_ERRNO). */
3232 int
3233 target_fileio_open (const char *filename, int flags, int mode,
3234 int *target_errno)
3235 {
3236 struct target_ops *t;
3237
3238 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3239 {
3240 if (t->to_fileio_open != NULL)
3241 {
3242 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3243
3244 if (targetdebug)
3245 fprintf_unfiltered (gdb_stdlog,
3246 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3247 filename, flags, mode,
3248 fd, fd != -1 ? 0 : *target_errno);
3249 return fd;
3250 }
3251 }
3252
3253 *target_errno = FILEIO_ENOSYS;
3254 return -1;
3255 }
3256
3257 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3258 Return the number of bytes written, or -1 if an error occurs
3259 (and set *TARGET_ERRNO). */
3260 int
3261 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3262 ULONGEST offset, int *target_errno)
3263 {
3264 struct target_ops *t;
3265
3266 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3267 {
3268 if (t->to_fileio_pwrite != NULL)
3269 {
3270 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3271 target_errno);
3272
3273 if (targetdebug)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "target_fileio_pwrite (%d,...,%d,%s) "
3276 "= %d (%d)\n",
3277 fd, len, pulongest (offset),
3278 ret, ret != -1 ? 0 : *target_errno);
3279 return ret;
3280 }
3281 }
3282
3283 *target_errno = FILEIO_ENOSYS;
3284 return -1;
3285 }
3286
3287 /* Read up to LEN bytes FD on the target into READ_BUF.
3288 Return the number of bytes read, or -1 if an error occurs
3289 (and set *TARGET_ERRNO). */
3290 int
3291 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3292 ULONGEST offset, int *target_errno)
3293 {
3294 struct target_ops *t;
3295
3296 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3297 {
3298 if (t->to_fileio_pread != NULL)
3299 {
3300 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3301 target_errno);
3302
3303 if (targetdebug)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "target_fileio_pread (%d,...,%d,%s) "
3306 "= %d (%d)\n",
3307 fd, len, pulongest (offset),
3308 ret, ret != -1 ? 0 : *target_errno);
3309 return ret;
3310 }
3311 }
3312
3313 *target_errno = FILEIO_ENOSYS;
3314 return -1;
3315 }
3316
3317 /* Close FD on the target. Return 0, or -1 if an error occurs
3318 (and set *TARGET_ERRNO). */
3319 int
3320 target_fileio_close (int fd, int *target_errno)
3321 {
3322 struct target_ops *t;
3323
3324 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3325 {
3326 if (t->to_fileio_close != NULL)
3327 {
3328 int ret = t->to_fileio_close (t, fd, target_errno);
3329
3330 if (targetdebug)
3331 fprintf_unfiltered (gdb_stdlog,
3332 "target_fileio_close (%d) = %d (%d)\n",
3333 fd, ret, ret != -1 ? 0 : *target_errno);
3334 return ret;
3335 }
3336 }
3337
3338 *target_errno = FILEIO_ENOSYS;
3339 return -1;
3340 }
3341
3342 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3343 occurs (and set *TARGET_ERRNO). */
3344 int
3345 target_fileio_unlink (const char *filename, int *target_errno)
3346 {
3347 struct target_ops *t;
3348
3349 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3350 {
3351 if (t->to_fileio_unlink != NULL)
3352 {
3353 int ret = t->to_fileio_unlink (t, filename, target_errno);
3354
3355 if (targetdebug)
3356 fprintf_unfiltered (gdb_stdlog,
3357 "target_fileio_unlink (%s) = %d (%d)\n",
3358 filename, ret, ret != -1 ? 0 : *target_errno);
3359 return ret;
3360 }
3361 }
3362
3363 *target_errno = FILEIO_ENOSYS;
3364 return -1;
3365 }
3366
3367 /* Read value of symbolic link FILENAME on the target. Return a
3368 null-terminated string allocated via xmalloc, or NULL if an error
3369 occurs (and set *TARGET_ERRNO). */
3370 char *
3371 target_fileio_readlink (const char *filename, int *target_errno)
3372 {
3373 struct target_ops *t;
3374
3375 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3376 {
3377 if (t->to_fileio_readlink != NULL)
3378 {
3379 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3380
3381 if (targetdebug)
3382 fprintf_unfiltered (gdb_stdlog,
3383 "target_fileio_readlink (%s) = %s (%d)\n",
3384 filename, ret? ret : "(nil)",
3385 ret? 0 : *target_errno);
3386 return ret;
3387 }
3388 }
3389
3390 *target_errno = FILEIO_ENOSYS;
3391 return NULL;
3392 }
3393
3394 static void
3395 target_fileio_close_cleanup (void *opaque)
3396 {
3397 int fd = *(int *) opaque;
3398 int target_errno;
3399
3400 target_fileio_close (fd, &target_errno);
3401 }
3402
3403 /* Read target file FILENAME. Store the result in *BUF_P and
3404 return the size of the transferred data. PADDING additional bytes are
3405 available in *BUF_P. This is a helper function for
3406 target_fileio_read_alloc; see the declaration of that function for more
3407 information. */
3408
3409 static LONGEST
3410 target_fileio_read_alloc_1 (const char *filename,
3411 gdb_byte **buf_p, int padding)
3412 {
3413 struct cleanup *close_cleanup;
3414 size_t buf_alloc, buf_pos;
3415 gdb_byte *buf;
3416 LONGEST n;
3417 int fd;
3418 int target_errno;
3419
3420 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3421 if (fd == -1)
3422 return -1;
3423
3424 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3425
3426 /* Start by reading up to 4K at a time. The target will throttle
3427 this number down if necessary. */
3428 buf_alloc = 4096;
3429 buf = xmalloc (buf_alloc);
3430 buf_pos = 0;
3431 while (1)
3432 {
3433 n = target_fileio_pread (fd, &buf[buf_pos],
3434 buf_alloc - buf_pos - padding, buf_pos,
3435 &target_errno);
3436 if (n < 0)
3437 {
3438 /* An error occurred. */
3439 do_cleanups (close_cleanup);
3440 xfree (buf);
3441 return -1;
3442 }
3443 else if (n == 0)
3444 {
3445 /* Read all there was. */
3446 do_cleanups (close_cleanup);
3447 if (buf_pos == 0)
3448 xfree (buf);
3449 else
3450 *buf_p = buf;
3451 return buf_pos;
3452 }
3453
3454 buf_pos += n;
3455
3456 /* If the buffer is filling up, expand it. */
3457 if (buf_alloc < buf_pos * 2)
3458 {
3459 buf_alloc *= 2;
3460 buf = xrealloc (buf, buf_alloc);
3461 }
3462
3463 QUIT;
3464 }
3465 }
3466
3467 /* Read target file FILENAME. Store the result in *BUF_P and return
3468 the size of the transferred data. See the declaration in "target.h"
3469 function for more information about the return value. */
3470
3471 LONGEST
3472 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3473 {
3474 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3475 }
3476
3477 /* Read target file FILENAME. The result is NUL-terminated and
3478 returned as a string, allocated using xmalloc. If an error occurs
3479 or the transfer is unsupported, NULL is returned. Empty objects
3480 are returned as allocated but empty strings. A warning is issued
3481 if the result contains any embedded NUL bytes. */
3482
3483 char *
3484 target_fileio_read_stralloc (const char *filename)
3485 {
3486 gdb_byte *buffer;
3487 char *bufstr;
3488 LONGEST i, transferred;
3489
3490 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3491 bufstr = (char *) buffer;
3492
3493 if (transferred < 0)
3494 return NULL;
3495
3496 if (transferred == 0)
3497 return xstrdup ("");
3498
3499 bufstr[transferred] = 0;
3500
3501 /* Check for embedded NUL bytes; but allow trailing NULs. */
3502 for (i = strlen (bufstr); i < transferred; i++)
3503 if (bufstr[i] != 0)
3504 {
3505 warning (_("target file %s "
3506 "contained unexpected null characters"),
3507 filename);
3508 break;
3509 }
3510
3511 return bufstr;
3512 }
3513
3514
3515 static int
3516 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3517 CORE_ADDR addr, int len)
3518 {
3519 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3520 }
3521
3522 static int
3523 default_watchpoint_addr_within_range (struct target_ops *target,
3524 CORE_ADDR addr,
3525 CORE_ADDR start, int length)
3526 {
3527 return addr >= start && addr < start + length;
3528 }
3529
3530 static struct gdbarch *
3531 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3532 {
3533 return target_gdbarch ();
3534 }
3535
3536 static int
3537 return_zero (void)
3538 {
3539 return 0;
3540 }
3541
3542 static int
3543 return_minus_one (void)
3544 {
3545 return -1;
3546 }
3547
3548 static void *
3549 return_null (void)
3550 {
3551 return 0;
3552 }
3553
3554 /*
3555 * Find the next target down the stack from the specified target.
3556 */
3557
3558 struct target_ops *
3559 find_target_beneath (struct target_ops *t)
3560 {
3561 return t->beneath;
3562 }
3563
3564 /* See target.h. */
3565
3566 struct target_ops *
3567 find_target_at (enum strata stratum)
3568 {
3569 struct target_ops *t;
3570
3571 for (t = current_target.beneath; t != NULL; t = t->beneath)
3572 if (t->to_stratum == stratum)
3573 return t;
3574
3575 return NULL;
3576 }
3577
3578 \f
3579 /* The inferior process has died. Long live the inferior! */
3580
3581 void
3582 generic_mourn_inferior (void)
3583 {
3584 ptid_t ptid;
3585
3586 ptid = inferior_ptid;
3587 inferior_ptid = null_ptid;
3588
3589 /* Mark breakpoints uninserted in case something tries to delete a
3590 breakpoint while we delete the inferior's threads (which would
3591 fail, since the inferior is long gone). */
3592 mark_breakpoints_out ();
3593
3594 if (!ptid_equal (ptid, null_ptid))
3595 {
3596 int pid = ptid_get_pid (ptid);
3597 exit_inferior (pid);
3598 }
3599
3600 /* Note this wipes step-resume breakpoints, so needs to be done
3601 after exit_inferior, which ends up referencing the step-resume
3602 breakpoints through clear_thread_inferior_resources. */
3603 breakpoint_init_inferior (inf_exited);
3604
3605 registers_changed ();
3606
3607 reopen_exec_file ();
3608 reinit_frame_cache ();
3609
3610 if (deprecated_detach_hook)
3611 deprecated_detach_hook ();
3612 }
3613 \f
3614 /* Convert a normal process ID to a string. Returns the string in a
3615 static buffer. */
3616
3617 char *
3618 normal_pid_to_str (ptid_t ptid)
3619 {
3620 static char buf[32];
3621
3622 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3623 return buf;
3624 }
3625
3626 static char *
3627 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3628 {
3629 return normal_pid_to_str (ptid);
3630 }
3631
3632 /* Error-catcher for target_find_memory_regions. */
3633 static int
3634 dummy_find_memory_regions (struct target_ops *self,
3635 find_memory_region_ftype ignore1, void *ignore2)
3636 {
3637 error (_("Command not implemented for this target."));
3638 return 0;
3639 }
3640
3641 /* Error-catcher for target_make_corefile_notes. */
3642 static char *
3643 dummy_make_corefile_notes (struct target_ops *self,
3644 bfd *ignore1, int *ignore2)
3645 {
3646 error (_("Command not implemented for this target."));
3647 return NULL;
3648 }
3649
3650 /* Set up the handful of non-empty slots needed by the dummy target
3651 vector. */
3652
3653 static void
3654 init_dummy_target (void)
3655 {
3656 dummy_target.to_shortname = "None";
3657 dummy_target.to_longname = "None";
3658 dummy_target.to_doc = "";
3659 dummy_target.to_create_inferior = find_default_create_inferior;
3660 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3661 dummy_target.to_supports_disable_randomization
3662 = find_default_supports_disable_randomization;
3663 dummy_target.to_pid_to_str = dummy_pid_to_str;
3664 dummy_target.to_stratum = dummy_stratum;
3665 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3666 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3667 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3668 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3669 dummy_target.to_has_execution
3670 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3671 dummy_target.to_magic = OPS_MAGIC;
3672
3673 install_dummy_methods (&dummy_target);
3674 }
3675 \f
3676 static void
3677 debug_to_open (char *args, int from_tty)
3678 {
3679 debug_target.to_open (args, from_tty);
3680
3681 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3682 }
3683
3684 void
3685 target_close (struct target_ops *targ)
3686 {
3687 gdb_assert (!target_is_pushed (targ));
3688
3689 if (targ->to_xclose != NULL)
3690 targ->to_xclose (targ);
3691 else if (targ->to_close != NULL)
3692 targ->to_close (targ);
3693
3694 if (targetdebug)
3695 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3696 }
3697
3698 void
3699 target_attach (char *args, int from_tty)
3700 {
3701 current_target.to_attach (&current_target, args, from_tty);
3702 if (targetdebug)
3703 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3704 args, from_tty);
3705 }
3706
3707 int
3708 target_thread_alive (ptid_t ptid)
3709 {
3710 struct target_ops *t;
3711
3712 for (t = current_target.beneath; t != NULL; t = t->beneath)
3713 {
3714 if (t->to_thread_alive != NULL)
3715 {
3716 int retval;
3717
3718 retval = t->to_thread_alive (t, ptid);
3719 if (targetdebug)
3720 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3721 ptid_get_pid (ptid), retval);
3722
3723 return retval;
3724 }
3725 }
3726
3727 return 0;
3728 }
3729
3730 void
3731 target_find_new_threads (void)
3732 {
3733 struct target_ops *t;
3734
3735 for (t = current_target.beneath; t != NULL; t = t->beneath)
3736 {
3737 if (t->to_find_new_threads != NULL)
3738 {
3739 t->to_find_new_threads (t);
3740 if (targetdebug)
3741 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3742
3743 return;
3744 }
3745 }
3746 }
3747
3748 void
3749 target_stop (ptid_t ptid)
3750 {
3751 if (!may_stop)
3752 {
3753 warning (_("May not interrupt or stop the target, ignoring attempt"));
3754 return;
3755 }
3756
3757 (*current_target.to_stop) (&current_target, ptid);
3758 }
3759
3760 static void
3761 debug_to_post_attach (struct target_ops *self, int pid)
3762 {
3763 debug_target.to_post_attach (&debug_target, pid);
3764
3765 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3766 }
3767
3768 /* Concatenate ELEM to LIST, a comma separate list, and return the
3769 result. The LIST incoming argument is released. */
3770
3771 static char *
3772 str_comma_list_concat_elem (char *list, const char *elem)
3773 {
3774 if (list == NULL)
3775 return xstrdup (elem);
3776 else
3777 return reconcat (list, list, ", ", elem, (char *) NULL);
3778 }
3779
3780 /* Helper for target_options_to_string. If OPT is present in
3781 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3782 Returns the new resulting string. OPT is removed from
3783 TARGET_OPTIONS. */
3784
3785 static char *
3786 do_option (int *target_options, char *ret,
3787 int opt, char *opt_str)
3788 {
3789 if ((*target_options & opt) != 0)
3790 {
3791 ret = str_comma_list_concat_elem (ret, opt_str);
3792 *target_options &= ~opt;
3793 }
3794
3795 return ret;
3796 }
3797
3798 char *
3799 target_options_to_string (int target_options)
3800 {
3801 char *ret = NULL;
3802
3803 #define DO_TARG_OPTION(OPT) \
3804 ret = do_option (&target_options, ret, OPT, #OPT)
3805
3806 DO_TARG_OPTION (TARGET_WNOHANG);
3807
3808 if (target_options != 0)
3809 ret = str_comma_list_concat_elem (ret, "unknown???");
3810
3811 if (ret == NULL)
3812 ret = xstrdup ("");
3813 return ret;
3814 }
3815
3816 static void
3817 debug_print_register (const char * func,
3818 struct regcache *regcache, int regno)
3819 {
3820 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3821
3822 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3823 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3824 && gdbarch_register_name (gdbarch, regno) != NULL
3825 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3826 fprintf_unfiltered (gdb_stdlog, "(%s)",
3827 gdbarch_register_name (gdbarch, regno));
3828 else
3829 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3830 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3831 {
3832 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3833 int i, size = register_size (gdbarch, regno);
3834 gdb_byte buf[MAX_REGISTER_SIZE];
3835
3836 regcache_raw_collect (regcache, regno, buf);
3837 fprintf_unfiltered (gdb_stdlog, " = ");
3838 for (i = 0; i < size; i++)
3839 {
3840 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3841 }
3842 if (size <= sizeof (LONGEST))
3843 {
3844 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3845
3846 fprintf_unfiltered (gdb_stdlog, " %s %s",
3847 core_addr_to_string_nz (val), plongest (val));
3848 }
3849 }
3850 fprintf_unfiltered (gdb_stdlog, "\n");
3851 }
3852
3853 void
3854 target_fetch_registers (struct regcache *regcache, int regno)
3855 {
3856 struct target_ops *t;
3857
3858 for (t = current_target.beneath; t != NULL; t = t->beneath)
3859 {
3860 if (t->to_fetch_registers != NULL)
3861 {
3862 t->to_fetch_registers (t, regcache, regno);
3863 if (targetdebug)
3864 debug_print_register ("target_fetch_registers", regcache, regno);
3865 return;
3866 }
3867 }
3868 }
3869
3870 void
3871 target_store_registers (struct regcache *regcache, int regno)
3872 {
3873 struct target_ops *t;
3874
3875 if (!may_write_registers)
3876 error (_("Writing to registers is not allowed (regno %d)"), regno);
3877
3878 current_target.to_store_registers (&current_target, regcache, regno);
3879 if (targetdebug)
3880 {
3881 debug_print_register ("target_store_registers", regcache, regno);
3882 }
3883 }
3884
3885 int
3886 target_core_of_thread (ptid_t ptid)
3887 {
3888 struct target_ops *t;
3889
3890 for (t = current_target.beneath; t != NULL; t = t->beneath)
3891 {
3892 if (t->to_core_of_thread != NULL)
3893 {
3894 int retval = t->to_core_of_thread (t, ptid);
3895
3896 if (targetdebug)
3897 fprintf_unfiltered (gdb_stdlog,
3898 "target_core_of_thread (%d) = %d\n",
3899 ptid_get_pid (ptid), retval);
3900 return retval;
3901 }
3902 }
3903
3904 return -1;
3905 }
3906
3907 int
3908 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3909 {
3910 struct target_ops *t;
3911
3912 for (t = current_target.beneath; t != NULL; t = t->beneath)
3913 {
3914 if (t->to_verify_memory != NULL)
3915 {
3916 int retval = t->to_verify_memory (t, data, memaddr, size);
3917
3918 if (targetdebug)
3919 fprintf_unfiltered (gdb_stdlog,
3920 "target_verify_memory (%s, %s) = %d\n",
3921 paddress (target_gdbarch (), memaddr),
3922 pulongest (size),
3923 retval);
3924 return retval;
3925 }
3926 }
3927
3928 tcomplain ();
3929 }
3930
3931 /* The documentation for this function is in its prototype declaration in
3932 target.h. */
3933
3934 int
3935 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3936 {
3937 struct target_ops *t;
3938
3939 for (t = current_target.beneath; t != NULL; t = t->beneath)
3940 if (t->to_insert_mask_watchpoint != NULL)
3941 {
3942 int ret;
3943
3944 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3945
3946 if (targetdebug)
3947 fprintf_unfiltered (gdb_stdlog, "\
3948 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3949 core_addr_to_string (addr),
3950 core_addr_to_string (mask), rw, ret);
3951
3952 return ret;
3953 }
3954
3955 return 1;
3956 }
3957
3958 /* The documentation for this function is in its prototype declaration in
3959 target.h. */
3960
3961 int
3962 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3963 {
3964 struct target_ops *t;
3965
3966 for (t = current_target.beneath; t != NULL; t = t->beneath)
3967 if (t->to_remove_mask_watchpoint != NULL)
3968 {
3969 int ret;
3970
3971 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3972
3973 if (targetdebug)
3974 fprintf_unfiltered (gdb_stdlog, "\
3975 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3976 core_addr_to_string (addr),
3977 core_addr_to_string (mask), rw, ret);
3978
3979 return ret;
3980 }
3981
3982 return 1;
3983 }
3984
3985 /* The documentation for this function is in its prototype declaration
3986 in target.h. */
3987
3988 int
3989 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3990 {
3991 struct target_ops *t;
3992
3993 for (t = current_target.beneath; t != NULL; t = t->beneath)
3994 if (t->to_masked_watch_num_registers != NULL)
3995 return t->to_masked_watch_num_registers (t, addr, mask);
3996
3997 return -1;
3998 }
3999
4000 /* The documentation for this function is in its prototype declaration
4001 in target.h. */
4002
4003 int
4004 target_ranged_break_num_registers (void)
4005 {
4006 struct target_ops *t;
4007
4008 for (t = current_target.beneath; t != NULL; t = t->beneath)
4009 if (t->to_ranged_break_num_registers != NULL)
4010 return t->to_ranged_break_num_registers (t);
4011
4012 return -1;
4013 }
4014
4015 /* See target.h. */
4016
4017 struct btrace_target_info *
4018 target_enable_btrace (ptid_t ptid)
4019 {
4020 struct target_ops *t;
4021
4022 for (t = current_target.beneath; t != NULL; t = t->beneath)
4023 if (t->to_enable_btrace != NULL)
4024 return t->to_enable_btrace (t, ptid);
4025
4026 tcomplain ();
4027 return NULL;
4028 }
4029
4030 /* See target.h. */
4031
4032 void
4033 target_disable_btrace (struct btrace_target_info *btinfo)
4034 {
4035 struct target_ops *t;
4036
4037 for (t = current_target.beneath; t != NULL; t = t->beneath)
4038 if (t->to_disable_btrace != NULL)
4039 {
4040 t->to_disable_btrace (t, btinfo);
4041 return;
4042 }
4043
4044 tcomplain ();
4045 }
4046
4047 /* See target.h. */
4048
4049 void
4050 target_teardown_btrace (struct btrace_target_info *btinfo)
4051 {
4052 struct target_ops *t;
4053
4054 for (t = current_target.beneath; t != NULL; t = t->beneath)
4055 if (t->to_teardown_btrace != NULL)
4056 {
4057 t->to_teardown_btrace (t, btinfo);
4058 return;
4059 }
4060
4061 tcomplain ();
4062 }
4063
4064 /* See target.h. */
4065
4066 enum btrace_error
4067 target_read_btrace (VEC (btrace_block_s) **btrace,
4068 struct btrace_target_info *btinfo,
4069 enum btrace_read_type type)
4070 {
4071 struct target_ops *t;
4072
4073 for (t = current_target.beneath; t != NULL; t = t->beneath)
4074 if (t->to_read_btrace != NULL)
4075 return t->to_read_btrace (t, btrace, btinfo, type);
4076
4077 tcomplain ();
4078 return BTRACE_ERR_NOT_SUPPORTED;
4079 }
4080
4081 /* See target.h. */
4082
4083 void
4084 target_stop_recording (void)
4085 {
4086 struct target_ops *t;
4087
4088 for (t = current_target.beneath; t != NULL; t = t->beneath)
4089 if (t->to_stop_recording != NULL)
4090 {
4091 t->to_stop_recording (t);
4092 return;
4093 }
4094
4095 /* This is optional. */
4096 }
4097
4098 /* See target.h. */
4099
4100 void
4101 target_info_record (void)
4102 {
4103 struct target_ops *t;
4104
4105 for (t = current_target.beneath; t != NULL; t = t->beneath)
4106 if (t->to_info_record != NULL)
4107 {
4108 t->to_info_record (t);
4109 return;
4110 }
4111
4112 tcomplain ();
4113 }
4114
4115 /* See target.h. */
4116
4117 void
4118 target_save_record (const char *filename)
4119 {
4120 struct target_ops *t;
4121
4122 for (t = current_target.beneath; t != NULL; t = t->beneath)
4123 if (t->to_save_record != NULL)
4124 {
4125 t->to_save_record (t, filename);
4126 return;
4127 }
4128
4129 tcomplain ();
4130 }
4131
4132 /* See target.h. */
4133
4134 int
4135 target_supports_delete_record (void)
4136 {
4137 struct target_ops *t;
4138
4139 for (t = current_target.beneath; t != NULL; t = t->beneath)
4140 if (t->to_delete_record != NULL)
4141 return 1;
4142
4143 return 0;
4144 }
4145
4146 /* See target.h. */
4147
4148 void
4149 target_delete_record (void)
4150 {
4151 struct target_ops *t;
4152
4153 for (t = current_target.beneath; t != NULL; t = t->beneath)
4154 if (t->to_delete_record != NULL)
4155 {
4156 t->to_delete_record (t);
4157 return;
4158 }
4159
4160 tcomplain ();
4161 }
4162
4163 /* See target.h. */
4164
4165 int
4166 target_record_is_replaying (void)
4167 {
4168 struct target_ops *t;
4169
4170 for (t = current_target.beneath; t != NULL; t = t->beneath)
4171 if (t->to_record_is_replaying != NULL)
4172 return t->to_record_is_replaying (t);
4173
4174 return 0;
4175 }
4176
4177 /* See target.h. */
4178
4179 void
4180 target_goto_record_begin (void)
4181 {
4182 struct target_ops *t;
4183
4184 for (t = current_target.beneath; t != NULL; t = t->beneath)
4185 if (t->to_goto_record_begin != NULL)
4186 {
4187 t->to_goto_record_begin (t);
4188 return;
4189 }
4190
4191 tcomplain ();
4192 }
4193
4194 /* See target.h. */
4195
4196 void
4197 target_goto_record_end (void)
4198 {
4199 struct target_ops *t;
4200
4201 for (t = current_target.beneath; t != NULL; t = t->beneath)
4202 if (t->to_goto_record_end != NULL)
4203 {
4204 t->to_goto_record_end (t);
4205 return;
4206 }
4207
4208 tcomplain ();
4209 }
4210
4211 /* See target.h. */
4212
4213 void
4214 target_goto_record (ULONGEST insn)
4215 {
4216 struct target_ops *t;
4217
4218 for (t = current_target.beneath; t != NULL; t = t->beneath)
4219 if (t->to_goto_record != NULL)
4220 {
4221 t->to_goto_record (t, insn);
4222 return;
4223 }
4224
4225 tcomplain ();
4226 }
4227
4228 /* See target.h. */
4229
4230 void
4231 target_insn_history (int size, int flags)
4232 {
4233 struct target_ops *t;
4234
4235 for (t = current_target.beneath; t != NULL; t = t->beneath)
4236 if (t->to_insn_history != NULL)
4237 {
4238 t->to_insn_history (t, size, flags);
4239 return;
4240 }
4241
4242 tcomplain ();
4243 }
4244
4245 /* See target.h. */
4246
4247 void
4248 target_insn_history_from (ULONGEST from, int size, int flags)
4249 {
4250 struct target_ops *t;
4251
4252 for (t = current_target.beneath; t != NULL; t = t->beneath)
4253 if (t->to_insn_history_from != NULL)
4254 {
4255 t->to_insn_history_from (t, from, size, flags);
4256 return;
4257 }
4258
4259 tcomplain ();
4260 }
4261
4262 /* See target.h. */
4263
4264 void
4265 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4266 {
4267 struct target_ops *t;
4268
4269 for (t = current_target.beneath; t != NULL; t = t->beneath)
4270 if (t->to_insn_history_range != NULL)
4271 {
4272 t->to_insn_history_range (t, begin, end, flags);
4273 return;
4274 }
4275
4276 tcomplain ();
4277 }
4278
4279 /* See target.h. */
4280
4281 void
4282 target_call_history (int size, int flags)
4283 {
4284 struct target_ops *t;
4285
4286 for (t = current_target.beneath; t != NULL; t = t->beneath)
4287 if (t->to_call_history != NULL)
4288 {
4289 t->to_call_history (t, size, flags);
4290 return;
4291 }
4292
4293 tcomplain ();
4294 }
4295
4296 /* See target.h. */
4297
4298 void
4299 target_call_history_from (ULONGEST begin, int size, int flags)
4300 {
4301 struct target_ops *t;
4302
4303 for (t = current_target.beneath; t != NULL; t = t->beneath)
4304 if (t->to_call_history_from != NULL)
4305 {
4306 t->to_call_history_from (t, begin, size, flags);
4307 return;
4308 }
4309
4310 tcomplain ();
4311 }
4312
4313 /* See target.h. */
4314
4315 void
4316 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4317 {
4318 struct target_ops *t;
4319
4320 for (t = current_target.beneath; t != NULL; t = t->beneath)
4321 if (t->to_call_history_range != NULL)
4322 {
4323 t->to_call_history_range (t, begin, end, flags);
4324 return;
4325 }
4326
4327 tcomplain ();
4328 }
4329
4330 static void
4331 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4332 {
4333 debug_target.to_prepare_to_store (&debug_target, regcache);
4334
4335 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4336 }
4337
4338 /* See target.h. */
4339
4340 const struct frame_unwind *
4341 target_get_unwinder (void)
4342 {
4343 struct target_ops *t;
4344
4345 for (t = current_target.beneath; t != NULL; t = t->beneath)
4346 if (t->to_get_unwinder != NULL)
4347 return t->to_get_unwinder;
4348
4349 return NULL;
4350 }
4351
4352 /* See target.h. */
4353
4354 const struct frame_unwind *
4355 target_get_tailcall_unwinder (void)
4356 {
4357 struct target_ops *t;
4358
4359 for (t = current_target.beneath; t != NULL; t = t->beneath)
4360 if (t->to_get_tailcall_unwinder != NULL)
4361 return t->to_get_tailcall_unwinder;
4362
4363 return NULL;
4364 }
4365
4366 /* See target.h. */
4367
4368 CORE_ADDR
4369 forward_target_decr_pc_after_break (struct target_ops *ops,
4370 struct gdbarch *gdbarch)
4371 {
4372 for (; ops != NULL; ops = ops->beneath)
4373 if (ops->to_decr_pc_after_break != NULL)
4374 return ops->to_decr_pc_after_break (ops, gdbarch);
4375
4376 return gdbarch_decr_pc_after_break (gdbarch);
4377 }
4378
4379 /* See target.h. */
4380
4381 CORE_ADDR
4382 target_decr_pc_after_break (struct gdbarch *gdbarch)
4383 {
4384 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4385 }
4386
4387 static int
4388 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4389 int write, struct mem_attrib *attrib,
4390 struct target_ops *target)
4391 {
4392 int retval;
4393
4394 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4395 attrib, target);
4396
4397 fprintf_unfiltered (gdb_stdlog,
4398 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4399 paddress (target_gdbarch (), memaddr), len,
4400 write ? "write" : "read", retval);
4401
4402 if (retval > 0)
4403 {
4404 int i;
4405
4406 fputs_unfiltered (", bytes =", gdb_stdlog);
4407 for (i = 0; i < retval; i++)
4408 {
4409 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4410 {
4411 if (targetdebug < 2 && i > 0)
4412 {
4413 fprintf_unfiltered (gdb_stdlog, " ...");
4414 break;
4415 }
4416 fprintf_unfiltered (gdb_stdlog, "\n");
4417 }
4418
4419 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4420 }
4421 }
4422
4423 fputc_unfiltered ('\n', gdb_stdlog);
4424
4425 return retval;
4426 }
4427
4428 static void
4429 debug_to_files_info (struct target_ops *target)
4430 {
4431 debug_target.to_files_info (target);
4432
4433 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4434 }
4435
4436 static int
4437 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4438 struct bp_target_info *bp_tgt)
4439 {
4440 int retval;
4441
4442 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4443
4444 fprintf_unfiltered (gdb_stdlog,
4445 "target_insert_breakpoint (%s, xxx) = %ld\n",
4446 core_addr_to_string (bp_tgt->placed_address),
4447 (unsigned long) retval);
4448 return retval;
4449 }
4450
4451 static int
4452 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4453 struct bp_target_info *bp_tgt)
4454 {
4455 int retval;
4456
4457 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4458
4459 fprintf_unfiltered (gdb_stdlog,
4460 "target_remove_breakpoint (%s, xxx) = %ld\n",
4461 core_addr_to_string (bp_tgt->placed_address),
4462 (unsigned long) retval);
4463 return retval;
4464 }
4465
4466 static int
4467 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4468 int type, int cnt, int from_tty)
4469 {
4470 int retval;
4471
4472 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4473 type, cnt, from_tty);
4474
4475 fprintf_unfiltered (gdb_stdlog,
4476 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4477 (unsigned long) type,
4478 (unsigned long) cnt,
4479 (unsigned long) from_tty,
4480 (unsigned long) retval);
4481 return retval;
4482 }
4483
4484 static int
4485 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4486 CORE_ADDR addr, int len)
4487 {
4488 CORE_ADDR retval;
4489
4490 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4491 addr, len);
4492
4493 fprintf_unfiltered (gdb_stdlog,
4494 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4495 core_addr_to_string (addr), (unsigned long) len,
4496 core_addr_to_string (retval));
4497 return retval;
4498 }
4499
4500 static int
4501 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4502 CORE_ADDR addr, int len, int rw,
4503 struct expression *cond)
4504 {
4505 int retval;
4506
4507 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4508 addr, len,
4509 rw, cond);
4510
4511 fprintf_unfiltered (gdb_stdlog,
4512 "target_can_accel_watchpoint_condition "
4513 "(%s, %d, %d, %s) = %ld\n",
4514 core_addr_to_string (addr), len, rw,
4515 host_address_to_string (cond), (unsigned long) retval);
4516 return retval;
4517 }
4518
4519 static int
4520 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4521 {
4522 int retval;
4523
4524 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4525
4526 fprintf_unfiltered (gdb_stdlog,
4527 "target_stopped_by_watchpoint () = %ld\n",
4528 (unsigned long) retval);
4529 return retval;
4530 }
4531
4532 static int
4533 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4534 {
4535 int retval;
4536
4537 retval = debug_target.to_stopped_data_address (target, addr);
4538
4539 fprintf_unfiltered (gdb_stdlog,
4540 "target_stopped_data_address ([%s]) = %ld\n",
4541 core_addr_to_string (*addr),
4542 (unsigned long)retval);
4543 return retval;
4544 }
4545
4546 static int
4547 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4548 CORE_ADDR addr,
4549 CORE_ADDR start, int length)
4550 {
4551 int retval;
4552
4553 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4554 start, length);
4555
4556 fprintf_filtered (gdb_stdlog,
4557 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4558 core_addr_to_string (addr), core_addr_to_string (start),
4559 length, retval);
4560 return retval;
4561 }
4562
4563 static int
4564 debug_to_insert_hw_breakpoint (struct target_ops *self,
4565 struct gdbarch *gdbarch,
4566 struct bp_target_info *bp_tgt)
4567 {
4568 int retval;
4569
4570 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4571 gdbarch, bp_tgt);
4572
4573 fprintf_unfiltered (gdb_stdlog,
4574 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4575 core_addr_to_string (bp_tgt->placed_address),
4576 (unsigned long) retval);
4577 return retval;
4578 }
4579
4580 static int
4581 debug_to_remove_hw_breakpoint (struct target_ops *self,
4582 struct gdbarch *gdbarch,
4583 struct bp_target_info *bp_tgt)
4584 {
4585 int retval;
4586
4587 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4588 gdbarch, bp_tgt);
4589
4590 fprintf_unfiltered (gdb_stdlog,
4591 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4592 core_addr_to_string (bp_tgt->placed_address),
4593 (unsigned long) retval);
4594 return retval;
4595 }
4596
4597 static int
4598 debug_to_insert_watchpoint (struct target_ops *self,
4599 CORE_ADDR addr, int len, int type,
4600 struct expression *cond)
4601 {
4602 int retval;
4603
4604 retval = debug_target.to_insert_watchpoint (&debug_target,
4605 addr, len, type, cond);
4606
4607 fprintf_unfiltered (gdb_stdlog,
4608 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4609 core_addr_to_string (addr), len, type,
4610 host_address_to_string (cond), (unsigned long) retval);
4611 return retval;
4612 }
4613
4614 static int
4615 debug_to_remove_watchpoint (struct target_ops *self,
4616 CORE_ADDR addr, int len, int type,
4617 struct expression *cond)
4618 {
4619 int retval;
4620
4621 retval = debug_target.to_remove_watchpoint (&debug_target,
4622 addr, len, type, cond);
4623
4624 fprintf_unfiltered (gdb_stdlog,
4625 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4626 core_addr_to_string (addr), len, type,
4627 host_address_to_string (cond), (unsigned long) retval);
4628 return retval;
4629 }
4630
4631 static void
4632 debug_to_terminal_init (struct target_ops *self)
4633 {
4634 debug_target.to_terminal_init (&debug_target);
4635
4636 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4637 }
4638
4639 static void
4640 debug_to_terminal_inferior (struct target_ops *self)
4641 {
4642 debug_target.to_terminal_inferior (&debug_target);
4643
4644 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4645 }
4646
4647 static void
4648 debug_to_terminal_ours_for_output (struct target_ops *self)
4649 {
4650 debug_target.to_terminal_ours_for_output (&debug_target);
4651
4652 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4653 }
4654
4655 static void
4656 debug_to_terminal_ours (struct target_ops *self)
4657 {
4658 debug_target.to_terminal_ours (&debug_target);
4659
4660 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4661 }
4662
4663 static void
4664 debug_to_terminal_save_ours (struct target_ops *self)
4665 {
4666 debug_target.to_terminal_save_ours (&debug_target);
4667
4668 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4669 }
4670
4671 static void
4672 debug_to_terminal_info (struct target_ops *self,
4673 const char *arg, int from_tty)
4674 {
4675 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4676
4677 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4678 from_tty);
4679 }
4680
4681 static void
4682 debug_to_load (struct target_ops *self, char *args, int from_tty)
4683 {
4684 debug_target.to_load (&debug_target, args, from_tty);
4685
4686 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4687 }
4688
4689 static void
4690 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4691 {
4692 debug_target.to_post_startup_inferior (&debug_target, ptid);
4693
4694 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4695 ptid_get_pid (ptid));
4696 }
4697
4698 static int
4699 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4700 {
4701 int retval;
4702
4703 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4704
4705 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4706 pid, retval);
4707
4708 return retval;
4709 }
4710
4711 static int
4712 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4713 {
4714 int retval;
4715
4716 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4717
4718 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4719 pid, retval);
4720
4721 return retval;
4722 }
4723
4724 static int
4725 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4726 {
4727 int retval;
4728
4729 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4730
4731 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4732 pid, retval);
4733
4734 return retval;
4735 }
4736
4737 static int
4738 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4739 {
4740 int retval;
4741
4742 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4743
4744 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4745 pid, retval);
4746
4747 return retval;
4748 }
4749
4750 static int
4751 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4752 {
4753 int retval;
4754
4755 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4756
4757 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4758 pid, retval);
4759
4760 return retval;
4761 }
4762
4763 static int
4764 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4765 {
4766 int retval;
4767
4768 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4769
4770 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4771 pid, retval);
4772
4773 return retval;
4774 }
4775
4776 static int
4777 debug_to_has_exited (struct target_ops *self,
4778 int pid, int wait_status, int *exit_status)
4779 {
4780 int has_exited;
4781
4782 has_exited = debug_target.to_has_exited (&debug_target,
4783 pid, wait_status, exit_status);
4784
4785 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4786 pid, wait_status, *exit_status, has_exited);
4787
4788 return has_exited;
4789 }
4790
4791 static int
4792 debug_to_can_run (struct target_ops *self)
4793 {
4794 int retval;
4795
4796 retval = debug_target.to_can_run (&debug_target);
4797
4798 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4799
4800 return retval;
4801 }
4802
4803 static struct gdbarch *
4804 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4805 {
4806 struct gdbarch *retval;
4807
4808 retval = debug_target.to_thread_architecture (ops, ptid);
4809
4810 fprintf_unfiltered (gdb_stdlog,
4811 "target_thread_architecture (%s) = %s [%s]\n",
4812 target_pid_to_str (ptid),
4813 host_address_to_string (retval),
4814 gdbarch_bfd_arch_info (retval)->printable_name);
4815 return retval;
4816 }
4817
4818 static void
4819 debug_to_stop (struct target_ops *self, ptid_t ptid)
4820 {
4821 debug_target.to_stop (&debug_target, ptid);
4822
4823 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4824 target_pid_to_str (ptid));
4825 }
4826
4827 static void
4828 debug_to_rcmd (struct target_ops *self, char *command,
4829 struct ui_file *outbuf)
4830 {
4831 debug_target.to_rcmd (&debug_target, command, outbuf);
4832 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4833 }
4834
4835 static char *
4836 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4837 {
4838 char *exec_file;
4839
4840 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4841
4842 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4843 pid, exec_file);
4844
4845 return exec_file;
4846 }
4847
4848 static void
4849 setup_target_debug (void)
4850 {
4851 memcpy (&debug_target, &current_target, sizeof debug_target);
4852
4853 current_target.to_open = debug_to_open;
4854 current_target.to_post_attach = debug_to_post_attach;
4855 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4856 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4857 current_target.to_files_info = debug_to_files_info;
4858 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4859 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4860 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4861 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4862 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4863 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4864 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4865 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4866 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4867 current_target.to_watchpoint_addr_within_range
4868 = debug_to_watchpoint_addr_within_range;
4869 current_target.to_region_ok_for_hw_watchpoint
4870 = debug_to_region_ok_for_hw_watchpoint;
4871 current_target.to_can_accel_watchpoint_condition
4872 = debug_to_can_accel_watchpoint_condition;
4873 current_target.to_terminal_init = debug_to_terminal_init;
4874 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4875 current_target.to_terminal_ours_for_output
4876 = debug_to_terminal_ours_for_output;
4877 current_target.to_terminal_ours = debug_to_terminal_ours;
4878 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4879 current_target.to_terminal_info = debug_to_terminal_info;
4880 current_target.to_load = debug_to_load;
4881 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4882 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4883 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4884 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4885 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4886 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4887 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4888 current_target.to_has_exited = debug_to_has_exited;
4889 current_target.to_can_run = debug_to_can_run;
4890 current_target.to_stop = debug_to_stop;
4891 current_target.to_rcmd = debug_to_rcmd;
4892 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4893 current_target.to_thread_architecture = debug_to_thread_architecture;
4894 }
4895 \f
4896
4897 static char targ_desc[] =
4898 "Names of targets and files being debugged.\nShows the entire \
4899 stack of targets currently in use (including the exec-file,\n\
4900 core-file, and process, if any), as well as the symbol file name.";
4901
4902 static void
4903 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4904 {
4905 error (_("\"monitor\" command not supported by this target."));
4906 }
4907
4908 static void
4909 do_monitor_command (char *cmd,
4910 int from_tty)
4911 {
4912 target_rcmd (cmd, gdb_stdtarg);
4913 }
4914
4915 /* Print the name of each layers of our target stack. */
4916
4917 static void
4918 maintenance_print_target_stack (char *cmd, int from_tty)
4919 {
4920 struct target_ops *t;
4921
4922 printf_filtered (_("The current target stack is:\n"));
4923
4924 for (t = target_stack; t != NULL; t = t->beneath)
4925 {
4926 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4927 }
4928 }
4929
4930 /* Controls if async mode is permitted. */
4931 int target_async_permitted = 0;
4932
4933 /* The set command writes to this variable. If the inferior is
4934 executing, target_async_permitted is *not* updated. */
4935 static int target_async_permitted_1 = 0;
4936
4937 static void
4938 set_target_async_command (char *args, int from_tty,
4939 struct cmd_list_element *c)
4940 {
4941 if (have_live_inferiors ())
4942 {
4943 target_async_permitted_1 = target_async_permitted;
4944 error (_("Cannot change this setting while the inferior is running."));
4945 }
4946
4947 target_async_permitted = target_async_permitted_1;
4948 }
4949
4950 static void
4951 show_target_async_command (struct ui_file *file, int from_tty,
4952 struct cmd_list_element *c,
4953 const char *value)
4954 {
4955 fprintf_filtered (file,
4956 _("Controlling the inferior in "
4957 "asynchronous mode is %s.\n"), value);
4958 }
4959
4960 /* Temporary copies of permission settings. */
4961
4962 static int may_write_registers_1 = 1;
4963 static int may_write_memory_1 = 1;
4964 static int may_insert_breakpoints_1 = 1;
4965 static int may_insert_tracepoints_1 = 1;
4966 static int may_insert_fast_tracepoints_1 = 1;
4967 static int may_stop_1 = 1;
4968
4969 /* Make the user-set values match the real values again. */
4970
4971 void
4972 update_target_permissions (void)
4973 {
4974 may_write_registers_1 = may_write_registers;
4975 may_write_memory_1 = may_write_memory;
4976 may_insert_breakpoints_1 = may_insert_breakpoints;
4977 may_insert_tracepoints_1 = may_insert_tracepoints;
4978 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4979 may_stop_1 = may_stop;
4980 }
4981
4982 /* The one function handles (most of) the permission flags in the same
4983 way. */
4984
4985 static void
4986 set_target_permissions (char *args, int from_tty,
4987 struct cmd_list_element *c)
4988 {
4989 if (target_has_execution)
4990 {
4991 update_target_permissions ();
4992 error (_("Cannot change this setting while the inferior is running."));
4993 }
4994
4995 /* Make the real values match the user-changed values. */
4996 may_write_registers = may_write_registers_1;
4997 may_insert_breakpoints = may_insert_breakpoints_1;
4998 may_insert_tracepoints = may_insert_tracepoints_1;
4999 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5000 may_stop = may_stop_1;
5001 update_observer_mode ();
5002 }
5003
5004 /* Set memory write permission independently of observer mode. */
5005
5006 static void
5007 set_write_memory_permission (char *args, int from_tty,
5008 struct cmd_list_element *c)
5009 {
5010 /* Make the real values match the user-changed values. */
5011 may_write_memory = may_write_memory_1;
5012 update_observer_mode ();
5013 }
5014
5015
5016 void
5017 initialize_targets (void)
5018 {
5019 init_dummy_target ();
5020 push_target (&dummy_target);
5021
5022 add_info ("target", target_info, targ_desc);
5023 add_info ("files", target_info, targ_desc);
5024
5025 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5026 Set target debugging."), _("\
5027 Show target debugging."), _("\
5028 When non-zero, target debugging is enabled. Higher numbers are more\n\
5029 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5030 command."),
5031 NULL,
5032 show_targetdebug,
5033 &setdebuglist, &showdebuglist);
5034
5035 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5036 &trust_readonly, _("\
5037 Set mode for reading from readonly sections."), _("\
5038 Show mode for reading from readonly sections."), _("\
5039 When this mode is on, memory reads from readonly sections (such as .text)\n\
5040 will be read from the object file instead of from the target. This will\n\
5041 result in significant performance improvement for remote targets."),
5042 NULL,
5043 show_trust_readonly,
5044 &setlist, &showlist);
5045
5046 add_com ("monitor", class_obscure, do_monitor_command,
5047 _("Send a command to the remote monitor (remote targets only)."));
5048
5049 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5050 _("Print the name of each layer of the internal target stack."),
5051 &maintenanceprintlist);
5052
5053 add_setshow_boolean_cmd ("target-async", no_class,
5054 &target_async_permitted_1, _("\
5055 Set whether gdb controls the inferior in asynchronous mode."), _("\
5056 Show whether gdb controls the inferior in asynchronous mode."), _("\
5057 Tells gdb whether to control the inferior in asynchronous mode."),
5058 set_target_async_command,
5059 show_target_async_command,
5060 &setlist,
5061 &showlist);
5062
5063 add_setshow_boolean_cmd ("may-write-registers", class_support,
5064 &may_write_registers_1, _("\
5065 Set permission to write into registers."), _("\
5066 Show permission to write into registers."), _("\
5067 When this permission is on, GDB may write into the target's registers.\n\
5068 Otherwise, any sort of write attempt will result in an error."),
5069 set_target_permissions, NULL,
5070 &setlist, &showlist);
5071
5072 add_setshow_boolean_cmd ("may-write-memory", class_support,
5073 &may_write_memory_1, _("\
5074 Set permission to write into target memory."), _("\
5075 Show permission to write into target memory."), _("\
5076 When this permission is on, GDB may write into the target's memory.\n\
5077 Otherwise, any sort of write attempt will result in an error."),
5078 set_write_memory_permission, NULL,
5079 &setlist, &showlist);
5080
5081 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5082 &may_insert_breakpoints_1, _("\
5083 Set permission to insert breakpoints in the target."), _("\
5084 Show permission to insert breakpoints in the target."), _("\
5085 When this permission is on, GDB may insert breakpoints in the program.\n\
5086 Otherwise, any sort of insertion attempt will result in an error."),
5087 set_target_permissions, NULL,
5088 &setlist, &showlist);
5089
5090 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5091 &may_insert_tracepoints_1, _("\
5092 Set permission to insert tracepoints in the target."), _("\
5093 Show permission to insert tracepoints in the target."), _("\
5094 When this permission is on, GDB may insert tracepoints in the program.\n\
5095 Otherwise, any sort of insertion attempt will result in an error."),
5096 set_target_permissions, NULL,
5097 &setlist, &showlist);
5098
5099 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5100 &may_insert_fast_tracepoints_1, _("\
5101 Set permission to insert fast tracepoints in the target."), _("\
5102 Show permission to insert fast tracepoints in the target."), _("\
5103 When this permission is on, GDB may insert fast tracepoints.\n\
5104 Otherwise, any sort of insertion attempt will result in an error."),
5105 set_target_permissions, NULL,
5106 &setlist, &showlist);
5107
5108 add_setshow_boolean_cmd ("may-interrupt", class_support,
5109 &may_stop_1, _("\
5110 Set permission to interrupt or signal the target."), _("\
5111 Show permission to interrupt or signal the target."), _("\
5112 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5113 Otherwise, any attempt to interrupt or stop will be ignored."),
5114 set_target_permissions, NULL,
5115 &setlist, &showlist);
5116 }
This page took 0.152264 seconds and 5 git commands to generate.