convert to_set_disconnected_tracing
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
61 long lwp, long tid);
62
63 static void tcomplain (void) ATTRIBUTE_NORETURN;
64
65 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
66
67 static int return_zero (void);
68
69 static void *return_null (void);
70
71 void target_ignore (void);
72
73 static void target_command (char *, int);
74
75 static struct target_ops *find_default_run_target (char *);
76
77 static target_xfer_partial_ftype default_xfer_partial;
78
79 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
80 ptid_t ptid);
81
82 static int dummy_find_memory_regions (struct target_ops *self,
83 find_memory_region_ftype ignore1,
84 void *ignore2);
85
86 static char *dummy_make_corefile_notes (struct target_ops *self,
87 bfd *ignore1, int *ignore2);
88
89 static int find_default_can_async_p (struct target_ops *ignore);
90
91 static int find_default_is_async_p (struct target_ops *ignore);
92
93 static enum exec_direction_kind default_execution_direction
94 (struct target_ops *self);
95
96 #include "target-delegates.c"
97
98 static void init_dummy_target (void);
99
100 static struct target_ops debug_target;
101
102 static void debug_to_open (char *, int);
103
104 static void debug_to_prepare_to_store (struct target_ops *self,
105 struct regcache *);
106
107 static void debug_to_files_info (struct target_ops *);
108
109 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
116 int, int, int);
117
118 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
119 struct gdbarch *,
120 struct bp_target_info *);
121
122 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
123 struct gdbarch *,
124 struct bp_target_info *);
125
126 static int debug_to_insert_watchpoint (struct target_ops *self,
127 CORE_ADDR, int, int,
128 struct expression *);
129
130 static int debug_to_remove_watchpoint (struct target_ops *self,
131 CORE_ADDR, int, int,
132 struct expression *);
133
134 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
135
136 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
137 CORE_ADDR, CORE_ADDR, int);
138
139 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
140 CORE_ADDR, int);
141
142 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
143 CORE_ADDR, int, int,
144 struct expression *);
145
146 static void debug_to_terminal_init (struct target_ops *self);
147
148 static void debug_to_terminal_inferior (struct target_ops *self);
149
150 static void debug_to_terminal_ours_for_output (struct target_ops *self);
151
152 static void debug_to_terminal_save_ours (struct target_ops *self);
153
154 static void debug_to_terminal_ours (struct target_ops *self);
155
156 static void debug_to_load (struct target_ops *self, char *, int);
157
158 static int debug_to_can_run (struct target_ops *self);
159
160 static void debug_to_stop (struct target_ops *self, ptid_t);
161
162 /* Pointer to array of target architecture structures; the size of the
163 array; the current index into the array; the allocated size of the
164 array. */
165 struct target_ops **target_structs;
166 unsigned target_struct_size;
167 unsigned target_struct_allocsize;
168 #define DEFAULT_ALLOCSIZE 10
169
170 /* The initial current target, so that there is always a semi-valid
171 current target. */
172
173 static struct target_ops dummy_target;
174
175 /* Top of target stack. */
176
177 static struct target_ops *target_stack;
178
179 /* The target structure we are currently using to talk to a process
180 or file or whatever "inferior" we have. */
181
182 struct target_ops current_target;
183
184 /* Command list for target. */
185
186 static struct cmd_list_element *targetlist = NULL;
187
188 /* Nonzero if we should trust readonly sections from the
189 executable when reading memory. */
190
191 static int trust_readonly = 0;
192
193 /* Nonzero if we should show true memory content including
194 memory breakpoint inserted by gdb. */
195
196 static int show_memory_breakpoints = 0;
197
198 /* These globals control whether GDB attempts to perform these
199 operations; they are useful for targets that need to prevent
200 inadvertant disruption, such as in non-stop mode. */
201
202 int may_write_registers = 1;
203
204 int may_write_memory = 1;
205
206 int may_insert_breakpoints = 1;
207
208 int may_insert_tracepoints = 1;
209
210 int may_insert_fast_tracepoints = 1;
211
212 int may_stop = 1;
213
214 /* Non-zero if we want to see trace of target level stuff. */
215
216 static unsigned int targetdebug = 0;
217 static void
218 show_targetdebug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
222 }
223
224 static void setup_target_debug (void);
225
226 /* The user just typed 'target' without the name of a target. */
227
228 static void
229 target_command (char *arg, int from_tty)
230 {
231 fputs_filtered ("Argument required (target name). Try `help target'\n",
232 gdb_stdout);
233 }
234
235 /* Default target_has_* methods for process_stratum targets. */
236
237 int
238 default_child_has_all_memory (struct target_ops *ops)
239 {
240 /* If no inferior selected, then we can't read memory here. */
241 if (ptid_equal (inferior_ptid, null_ptid))
242 return 0;
243
244 return 1;
245 }
246
247 int
248 default_child_has_memory (struct target_ops *ops)
249 {
250 /* If no inferior selected, then we can't read memory here. */
251 if (ptid_equal (inferior_ptid, null_ptid))
252 return 0;
253
254 return 1;
255 }
256
257 int
258 default_child_has_stack (struct target_ops *ops)
259 {
260 /* If no inferior selected, there's no stack. */
261 if (ptid_equal (inferior_ptid, null_ptid))
262 return 0;
263
264 return 1;
265 }
266
267 int
268 default_child_has_registers (struct target_ops *ops)
269 {
270 /* Can't read registers from no inferior. */
271 if (ptid_equal (inferior_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277 int
278 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
279 {
280 /* If there's no thread selected, then we can't make it run through
281 hoops. */
282 if (ptid_equal (the_ptid, null_ptid))
283 return 0;
284
285 return 1;
286 }
287
288
289 int
290 target_has_all_memory_1 (void)
291 {
292 struct target_ops *t;
293
294 for (t = current_target.beneath; t != NULL; t = t->beneath)
295 if (t->to_has_all_memory (t))
296 return 1;
297
298 return 0;
299 }
300
301 int
302 target_has_memory_1 (void)
303 {
304 struct target_ops *t;
305
306 for (t = current_target.beneath; t != NULL; t = t->beneath)
307 if (t->to_has_memory (t))
308 return 1;
309
310 return 0;
311 }
312
313 int
314 target_has_stack_1 (void)
315 {
316 struct target_ops *t;
317
318 for (t = current_target.beneath; t != NULL; t = t->beneath)
319 if (t->to_has_stack (t))
320 return 1;
321
322 return 0;
323 }
324
325 int
326 target_has_registers_1 (void)
327 {
328 struct target_ops *t;
329
330 for (t = current_target.beneath; t != NULL; t = t->beneath)
331 if (t->to_has_registers (t))
332 return 1;
333
334 return 0;
335 }
336
337 int
338 target_has_execution_1 (ptid_t the_ptid)
339 {
340 struct target_ops *t;
341
342 for (t = current_target.beneath; t != NULL; t = t->beneath)
343 if (t->to_has_execution (t, the_ptid))
344 return 1;
345
346 return 0;
347 }
348
349 int
350 target_has_execution_current (void)
351 {
352 return target_has_execution_1 (inferior_ptid);
353 }
354
355 /* Complete initialization of T. This ensures that various fields in
356 T are set, if needed by the target implementation. */
357
358 void
359 complete_target_initialization (struct target_ops *t)
360 {
361 /* Provide default values for all "must have" methods. */
362 if (t->to_xfer_partial == NULL)
363 t->to_xfer_partial = default_xfer_partial;
364
365 if (t->to_has_all_memory == NULL)
366 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
367
368 if (t->to_has_memory == NULL)
369 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
370
371 if (t->to_has_stack == NULL)
372 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
373
374 if (t->to_has_registers == NULL)
375 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
376
377 if (t->to_has_execution == NULL)
378 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
379
380 install_delegators (t);
381 }
382
383 /* Add possible target architecture T to the list and add a new
384 command 'target T->to_shortname'. Set COMPLETER as the command's
385 completer if not NULL. */
386
387 void
388 add_target_with_completer (struct target_ops *t,
389 completer_ftype *completer)
390 {
391 struct cmd_list_element *c;
392
393 complete_target_initialization (t);
394
395 if (!target_structs)
396 {
397 target_struct_allocsize = DEFAULT_ALLOCSIZE;
398 target_structs = (struct target_ops **) xmalloc
399 (target_struct_allocsize * sizeof (*target_structs));
400 }
401 if (target_struct_size >= target_struct_allocsize)
402 {
403 target_struct_allocsize *= 2;
404 target_structs = (struct target_ops **)
405 xrealloc ((char *) target_structs,
406 target_struct_allocsize * sizeof (*target_structs));
407 }
408 target_structs[target_struct_size++] = t;
409
410 if (targetlist == NULL)
411 add_prefix_cmd ("target", class_run, target_command, _("\
412 Connect to a target machine or process.\n\
413 The first argument is the type or protocol of the target machine.\n\
414 Remaining arguments are interpreted by the target protocol. For more\n\
415 information on the arguments for a particular protocol, type\n\
416 `help target ' followed by the protocol name."),
417 &targetlist, "target ", 0, &cmdlist);
418 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
419 &targetlist);
420 if (completer != NULL)
421 set_cmd_completer (c, completer);
422 }
423
424 /* Add a possible target architecture to the list. */
425
426 void
427 add_target (struct target_ops *t)
428 {
429 add_target_with_completer (t, NULL);
430 }
431
432 /* See target.h. */
433
434 void
435 add_deprecated_target_alias (struct target_ops *t, char *alias)
436 {
437 struct cmd_list_element *c;
438 char *alt;
439
440 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
441 see PR cli/15104. */
442 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
443 alt = xstrprintf ("target %s", t->to_shortname);
444 deprecate_cmd (c, alt);
445 }
446
447 /* Stub functions */
448
449 void
450 target_ignore (void)
451 {
452 }
453
454 void
455 target_kill (void)
456 {
457 struct target_ops *t;
458
459 for (t = current_target.beneath; t != NULL; t = t->beneath)
460 if (t->to_kill != NULL)
461 {
462 if (targetdebug)
463 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
464
465 t->to_kill (t);
466 return;
467 }
468
469 noprocess ();
470 }
471
472 void
473 target_load (char *arg, int from_tty)
474 {
475 target_dcache_invalidate ();
476 (*current_target.to_load) (&current_target, arg, from_tty);
477 }
478
479 void
480 target_create_inferior (char *exec_file, char *args,
481 char **env, int from_tty)
482 {
483 struct target_ops *t;
484
485 for (t = current_target.beneath; t != NULL; t = t->beneath)
486 {
487 if (t->to_create_inferior != NULL)
488 {
489 t->to_create_inferior (t, exec_file, args, env, from_tty);
490 if (targetdebug)
491 fprintf_unfiltered (gdb_stdlog,
492 "target_create_inferior (%s, %s, xxx, %d)\n",
493 exec_file, args, from_tty);
494 return;
495 }
496 }
497
498 internal_error (__FILE__, __LINE__,
499 _("could not find a target to create inferior"));
500 }
501
502 void
503 target_terminal_inferior (void)
504 {
505 /* A background resume (``run&'') should leave GDB in control of the
506 terminal. Use target_can_async_p, not target_is_async_p, since at
507 this point the target is not async yet. However, if sync_execution
508 is not set, we know it will become async prior to resume. */
509 if (target_can_async_p () && !sync_execution)
510 return;
511
512 /* If GDB is resuming the inferior in the foreground, install
513 inferior's terminal modes. */
514 (*current_target.to_terminal_inferior) (&current_target);
515 }
516
517 static int
518 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
519 struct target_ops *t)
520 {
521 errno = EIO; /* Can't read/write this location. */
522 return 0; /* No bytes handled. */
523 }
524
525 static void
526 tcomplain (void)
527 {
528 error (_("You can't do that when your target is `%s'"),
529 current_target.to_shortname);
530 }
531
532 void
533 noprocess (void)
534 {
535 error (_("You can't do that without a process to debug."));
536 }
537
538 static void
539 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
540 {
541 printf_unfiltered (_("No saved terminal information.\n"));
542 }
543
544 /* A default implementation for the to_get_ada_task_ptid target method.
545
546 This function builds the PTID by using both LWP and TID as part of
547 the PTID lwp and tid elements. The pid used is the pid of the
548 inferior_ptid. */
549
550 static ptid_t
551 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
552 {
553 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
554 }
555
556 static enum exec_direction_kind
557 default_execution_direction (struct target_ops *self)
558 {
559 if (!target_can_execute_reverse)
560 return EXEC_FORWARD;
561 else if (!target_can_async_p ())
562 return EXEC_FORWARD;
563 else
564 gdb_assert_not_reached ("\
565 to_execution_direction must be implemented for reverse async");
566 }
567
568 /* Go through the target stack from top to bottom, copying over zero
569 entries in current_target, then filling in still empty entries. In
570 effect, we are doing class inheritance through the pushed target
571 vectors.
572
573 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
574 is currently implemented, is that it discards any knowledge of
575 which target an inherited method originally belonged to.
576 Consequently, new new target methods should instead explicitly and
577 locally search the target stack for the target that can handle the
578 request. */
579
580 static void
581 update_current_target (void)
582 {
583 struct target_ops *t;
584
585 /* First, reset current's contents. */
586 memset (&current_target, 0, sizeof (current_target));
587
588 /* Install the delegators. */
589 install_delegators (&current_target);
590
591 #define INHERIT(FIELD, TARGET) \
592 if (!current_target.FIELD) \
593 current_target.FIELD = (TARGET)->FIELD
594
595 for (t = target_stack; t; t = t->beneath)
596 {
597 INHERIT (to_shortname, t);
598 INHERIT (to_longname, t);
599 INHERIT (to_doc, t);
600 /* Do not inherit to_open. */
601 /* Do not inherit to_close. */
602 /* Do not inherit to_attach. */
603 /* Do not inherit to_post_attach. */
604 INHERIT (to_attach_no_wait, t);
605 /* Do not inherit to_detach. */
606 /* Do not inherit to_disconnect. */
607 /* Do not inherit to_resume. */
608 /* Do not inherit to_wait. */
609 /* Do not inherit to_fetch_registers. */
610 /* Do not inherit to_store_registers. */
611 /* Do not inherit to_prepare_to_store. */
612 INHERIT (deprecated_xfer_memory, t);
613 /* Do not inherit to_files_info. */
614 /* Do not inherit to_insert_breakpoint. */
615 /* Do not inherit to_remove_breakpoint. */
616 /* Do not inherit to_can_use_hw_breakpoint. */
617 /* Do not inherit to_insert_hw_breakpoint. */
618 /* Do not inherit to_remove_hw_breakpoint. */
619 /* Do not inherit to_ranged_break_num_registers. */
620 /* Do not inherit to_insert_watchpoint. */
621 /* Do not inherit to_remove_watchpoint. */
622 /* Do not inherit to_insert_mask_watchpoint. */
623 /* Do not inherit to_remove_mask_watchpoint. */
624 /* Do not inherit to_stopped_data_address. */
625 INHERIT (to_have_steppable_watchpoint, t);
626 INHERIT (to_have_continuable_watchpoint, t);
627 /* Do not inherit to_stopped_by_watchpoint. */
628 /* Do not inherit to_watchpoint_addr_within_range. */
629 /* Do not inherit to_region_ok_for_hw_watchpoint. */
630 /* Do not inherit to_can_accel_watchpoint_condition. */
631 /* Do not inherit to_masked_watch_num_registers. */
632 /* Do not inherit to_terminal_init. */
633 /* Do not inherit to_terminal_inferior. */
634 /* Do not inherit to_terminal_ours_for_output. */
635 /* Do not inherit to_terminal_ours. */
636 /* Do not inherit to_terminal_save_ours. */
637 /* Do not inherit to_terminal_info. */
638 /* Do not inherit to_kill. */
639 /* Do not inherit to_load. */
640 /* Do no inherit to_create_inferior. */
641 /* Do not inherit to_post_startup_inferior. */
642 /* Do not inherit to_insert_fork_catchpoint. */
643 /* Do not inherit to_remove_fork_catchpoint. */
644 /* Do not inherit to_insert_vfork_catchpoint. */
645 /* Do not inherit to_remove_vfork_catchpoint. */
646 /* Do not inherit to_follow_fork. */
647 /* Do not inherit to_insert_exec_catchpoint. */
648 /* Do not inherit to_remove_exec_catchpoint. */
649 /* Do not inherit to_set_syscall_catchpoint. */
650 /* Do not inherit to_has_exited. */
651 /* Do not inherit to_mourn_inferior. */
652 INHERIT (to_can_run, t);
653 /* Do not inherit to_pass_signals. */
654 /* Do not inherit to_program_signals. */
655 /* Do not inherit to_thread_alive. */
656 /* Do not inherit to_find_new_threads. */
657 /* Do not inherit to_pid_to_str. */
658 /* Do not inherit to_extra_thread_info. */
659 /* Do not inherit to_thread_name. */
660 INHERIT (to_stop, t);
661 /* Do not inherit to_xfer_partial. */
662 /* Do not inherit to_rcmd. */
663 /* Do not inherit to_pid_to_exec_file. */
664 /* Do not inherit to_log_command. */
665 INHERIT (to_stratum, t);
666 /* Do not inherit to_has_all_memory. */
667 /* Do not inherit to_has_memory. */
668 /* Do not inherit to_has_stack. */
669 /* Do not inherit to_has_registers. */
670 /* Do not inherit to_has_execution. */
671 INHERIT (to_has_thread_control, t);
672 /* Do not inherit to_can_async_p. */
673 /* Do not inherit to_is_async_p. */
674 /* Do not inherit to_async. */
675 /* Do not inherit to_find_memory_regions. */
676 /* Do not inherit to_make_corefile_notes. */
677 /* Do not inherit to_get_bookmark. */
678 /* Do not inherit to_goto_bookmark. */
679 /* Do not inherit to_get_thread_local_address. */
680 /* Do not inherit to_can_execute_reverse. */
681 /* Do not inherit to_execution_direction. */
682 /* Do not inherit to_thread_architecture. */
683 /* Do not inherit to_read_description. */
684 /* Do not inherit to_get_ada_task_ptid. */
685 /* Do not inherit to_search_memory. */
686 /* Do not inherit to_supports_multi_process. */
687 /* Do not inherit to_supports_enable_disable_tracepoint. */
688 /* Do not inherit to_supports_string_tracing. */
689 /* Do not inherit to_trace_init. */
690 /* Do not inherit to_download_tracepoint. */
691 /* Do not inherit to_can_download_tracepoint. */
692 /* Do not inherit to_download_trace_state_variable. */
693 /* Do not inherit to_enable_tracepoint. */
694 /* Do not inherit to_disable_tracepoint. */
695 /* Do not inherit to_trace_set_readonly_regions. */
696 /* Do not inherit to_trace_start. */
697 /* Do not inherit to_get_trace_status. */
698 /* Do not inherit to_get_tracepoint_status. */
699 /* Do not inherit to_trace_stop. */
700 /* Do not inherit to_trace_find. */
701 /* Do not inherit to_get_trace_state_variable_value. */
702 /* Do not inherit to_save_trace_data. */
703 /* Do not inherit to_upload_tracepoints. */
704 /* Do not inherit to_upload_trace_state_variables. */
705 /* Do not inherit to_get_raw_trace_data. */
706 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
707 /* Do not inherit to_set_disconnected_tracing. */
708 INHERIT (to_set_circular_trace_buffer, t);
709 INHERIT (to_set_trace_buffer_size, t);
710 INHERIT (to_set_trace_notes, t);
711 INHERIT (to_get_tib_address, t);
712 INHERIT (to_set_permissions, t);
713 INHERIT (to_static_tracepoint_marker_at, t);
714 INHERIT (to_static_tracepoint_markers_by_strid, t);
715 INHERIT (to_traceframe_info, t);
716 INHERIT (to_use_agent, t);
717 INHERIT (to_can_use_agent, t);
718 INHERIT (to_augmented_libraries_svr4_read, t);
719 INHERIT (to_magic, t);
720 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
721 INHERIT (to_can_run_breakpoint_commands, t);
722 /* Do not inherit to_memory_map. */
723 /* Do not inherit to_flash_erase. */
724 /* Do not inherit to_flash_done. */
725 }
726 #undef INHERIT
727
728 /* Clean up a target struct so it no longer has any zero pointers in
729 it. Some entries are defaulted to a method that print an error,
730 others are hard-wired to a standard recursive default. */
731
732 #define de_fault(field, value) \
733 if (!current_target.field) \
734 current_target.field = value
735
736 de_fault (to_open,
737 (void (*) (char *, int))
738 tcomplain);
739 de_fault (to_close,
740 (void (*) (struct target_ops *))
741 target_ignore);
742 de_fault (deprecated_xfer_memory,
743 (int (*) (CORE_ADDR, gdb_byte *, int, int,
744 struct mem_attrib *, struct target_ops *))
745 nomemory);
746 de_fault (to_can_run,
747 (int (*) (struct target_ops *))
748 return_zero);
749 de_fault (to_stop,
750 (void (*) (struct target_ops *, ptid_t))
751 target_ignore);
752 current_target.to_read_description = NULL;
753 de_fault (to_set_circular_trace_buffer,
754 (void (*) (struct target_ops *, int))
755 target_ignore);
756 de_fault (to_set_trace_buffer_size,
757 (void (*) (struct target_ops *, LONGEST))
758 target_ignore);
759 de_fault (to_set_trace_notes,
760 (int (*) (struct target_ops *,
761 const char *, const char *, const char *))
762 return_zero);
763 de_fault (to_get_tib_address,
764 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
765 tcomplain);
766 de_fault (to_set_permissions,
767 (void (*) (struct target_ops *))
768 target_ignore);
769 de_fault (to_static_tracepoint_marker_at,
770 (int (*) (struct target_ops *,
771 CORE_ADDR, struct static_tracepoint_marker *))
772 return_zero);
773 de_fault (to_static_tracepoint_markers_by_strid,
774 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
775 const char *))
776 tcomplain);
777 de_fault (to_traceframe_info,
778 (struct traceframe_info * (*) (struct target_ops *))
779 return_null);
780 de_fault (to_supports_evaluation_of_breakpoint_conditions,
781 (int (*) (struct target_ops *))
782 return_zero);
783 de_fault (to_can_run_breakpoint_commands,
784 (int (*) (struct target_ops *))
785 return_zero);
786 de_fault (to_use_agent,
787 (int (*) (struct target_ops *, int))
788 tcomplain);
789 de_fault (to_can_use_agent,
790 (int (*) (struct target_ops *))
791 return_zero);
792 de_fault (to_augmented_libraries_svr4_read,
793 (int (*) (struct target_ops *))
794 return_zero);
795
796 #undef de_fault
797
798 /* Finally, position the target-stack beneath the squashed
799 "current_target". That way code looking for a non-inherited
800 target method can quickly and simply find it. */
801 current_target.beneath = target_stack;
802
803 if (targetdebug)
804 setup_target_debug ();
805 }
806
807 /* Push a new target type into the stack of the existing target accessors,
808 possibly superseding some of the existing accessors.
809
810 Rather than allow an empty stack, we always have the dummy target at
811 the bottom stratum, so we can call the function vectors without
812 checking them. */
813
814 void
815 push_target (struct target_ops *t)
816 {
817 struct target_ops **cur;
818
819 /* Check magic number. If wrong, it probably means someone changed
820 the struct definition, but not all the places that initialize one. */
821 if (t->to_magic != OPS_MAGIC)
822 {
823 fprintf_unfiltered (gdb_stderr,
824 "Magic number of %s target struct wrong\n",
825 t->to_shortname);
826 internal_error (__FILE__, __LINE__,
827 _("failed internal consistency check"));
828 }
829
830 /* Find the proper stratum to install this target in. */
831 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
832 {
833 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
834 break;
835 }
836
837 /* If there's already targets at this stratum, remove them. */
838 /* FIXME: cagney/2003-10-15: I think this should be popping all
839 targets to CUR, and not just those at this stratum level. */
840 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
841 {
842 /* There's already something at this stratum level. Close it,
843 and un-hook it from the stack. */
844 struct target_ops *tmp = (*cur);
845
846 (*cur) = (*cur)->beneath;
847 tmp->beneath = NULL;
848 target_close (tmp);
849 }
850
851 /* We have removed all targets in our stratum, now add the new one. */
852 t->beneath = (*cur);
853 (*cur) = t;
854
855 update_current_target ();
856 }
857
858 /* Remove a target_ops vector from the stack, wherever it may be.
859 Return how many times it was removed (0 or 1). */
860
861 int
862 unpush_target (struct target_ops *t)
863 {
864 struct target_ops **cur;
865 struct target_ops *tmp;
866
867 if (t->to_stratum == dummy_stratum)
868 internal_error (__FILE__, __LINE__,
869 _("Attempt to unpush the dummy target"));
870
871 /* Look for the specified target. Note that we assume that a target
872 can only occur once in the target stack. */
873
874 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
875 {
876 if ((*cur) == t)
877 break;
878 }
879
880 /* If we don't find target_ops, quit. Only open targets should be
881 closed. */
882 if ((*cur) == NULL)
883 return 0;
884
885 /* Unchain the target. */
886 tmp = (*cur);
887 (*cur) = (*cur)->beneath;
888 tmp->beneath = NULL;
889
890 update_current_target ();
891
892 /* Finally close the target. Note we do this after unchaining, so
893 any target method calls from within the target_close
894 implementation don't end up in T anymore. */
895 target_close (t);
896
897 return 1;
898 }
899
900 void
901 pop_all_targets_above (enum strata above_stratum)
902 {
903 while ((int) (current_target.to_stratum) > (int) above_stratum)
904 {
905 if (!unpush_target (target_stack))
906 {
907 fprintf_unfiltered (gdb_stderr,
908 "pop_all_targets couldn't find target %s\n",
909 target_stack->to_shortname);
910 internal_error (__FILE__, __LINE__,
911 _("failed internal consistency check"));
912 break;
913 }
914 }
915 }
916
917 void
918 pop_all_targets (void)
919 {
920 pop_all_targets_above (dummy_stratum);
921 }
922
923 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
924
925 int
926 target_is_pushed (struct target_ops *t)
927 {
928 struct target_ops **cur;
929
930 /* Check magic number. If wrong, it probably means someone changed
931 the struct definition, but not all the places that initialize one. */
932 if (t->to_magic != OPS_MAGIC)
933 {
934 fprintf_unfiltered (gdb_stderr,
935 "Magic number of %s target struct wrong\n",
936 t->to_shortname);
937 internal_error (__FILE__, __LINE__,
938 _("failed internal consistency check"));
939 }
940
941 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
942 if (*cur == t)
943 return 1;
944
945 return 0;
946 }
947
948 /* Using the objfile specified in OBJFILE, find the address for the
949 current thread's thread-local storage with offset OFFSET. */
950 CORE_ADDR
951 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
952 {
953 volatile CORE_ADDR addr = 0;
954 struct target_ops *target;
955
956 for (target = current_target.beneath;
957 target != NULL;
958 target = target->beneath)
959 {
960 if (target->to_get_thread_local_address != NULL)
961 break;
962 }
963
964 if (target != NULL
965 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
966 {
967 ptid_t ptid = inferior_ptid;
968 volatile struct gdb_exception ex;
969
970 TRY_CATCH (ex, RETURN_MASK_ALL)
971 {
972 CORE_ADDR lm_addr;
973
974 /* Fetch the load module address for this objfile. */
975 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
976 objfile);
977 /* If it's 0, throw the appropriate exception. */
978 if (lm_addr == 0)
979 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
980 _("TLS load module not found"));
981
982 addr = target->to_get_thread_local_address (target, ptid,
983 lm_addr, offset);
984 }
985 /* If an error occurred, print TLS related messages here. Otherwise,
986 throw the error to some higher catcher. */
987 if (ex.reason < 0)
988 {
989 int objfile_is_library = (objfile->flags & OBJF_SHARED);
990
991 switch (ex.error)
992 {
993 case TLS_NO_LIBRARY_SUPPORT_ERROR:
994 error (_("Cannot find thread-local variables "
995 "in this thread library."));
996 break;
997 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
998 if (objfile_is_library)
999 error (_("Cannot find shared library `%s' in dynamic"
1000 " linker's load module list"), objfile_name (objfile));
1001 else
1002 error (_("Cannot find executable file `%s' in dynamic"
1003 " linker's load module list"), objfile_name (objfile));
1004 break;
1005 case TLS_NOT_ALLOCATED_YET_ERROR:
1006 if (objfile_is_library)
1007 error (_("The inferior has not yet allocated storage for"
1008 " thread-local variables in\n"
1009 "the shared library `%s'\n"
1010 "for %s"),
1011 objfile_name (objfile), target_pid_to_str (ptid));
1012 else
1013 error (_("The inferior has not yet allocated storage for"
1014 " thread-local variables in\n"
1015 "the executable `%s'\n"
1016 "for %s"),
1017 objfile_name (objfile), target_pid_to_str (ptid));
1018 break;
1019 case TLS_GENERIC_ERROR:
1020 if (objfile_is_library)
1021 error (_("Cannot find thread-local storage for %s, "
1022 "shared library %s:\n%s"),
1023 target_pid_to_str (ptid),
1024 objfile_name (objfile), ex.message);
1025 else
1026 error (_("Cannot find thread-local storage for %s, "
1027 "executable file %s:\n%s"),
1028 target_pid_to_str (ptid),
1029 objfile_name (objfile), ex.message);
1030 break;
1031 default:
1032 throw_exception (ex);
1033 break;
1034 }
1035 }
1036 }
1037 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1038 TLS is an ABI-specific thing. But we don't do that yet. */
1039 else
1040 error (_("Cannot find thread-local variables on this target"));
1041
1042 return addr;
1043 }
1044
1045 const char *
1046 target_xfer_status_to_string (enum target_xfer_status err)
1047 {
1048 #define CASE(X) case X: return #X
1049 switch (err)
1050 {
1051 CASE(TARGET_XFER_E_IO);
1052 CASE(TARGET_XFER_E_UNAVAILABLE);
1053 default:
1054 return "<unknown>";
1055 }
1056 #undef CASE
1057 };
1058
1059
1060 #undef MIN
1061 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1062
1063 /* target_read_string -- read a null terminated string, up to LEN bytes,
1064 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1065 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1066 is responsible for freeing it. Return the number of bytes successfully
1067 read. */
1068
1069 int
1070 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1071 {
1072 int tlen, offset, i;
1073 gdb_byte buf[4];
1074 int errcode = 0;
1075 char *buffer;
1076 int buffer_allocated;
1077 char *bufptr;
1078 unsigned int nbytes_read = 0;
1079
1080 gdb_assert (string);
1081
1082 /* Small for testing. */
1083 buffer_allocated = 4;
1084 buffer = xmalloc (buffer_allocated);
1085 bufptr = buffer;
1086
1087 while (len > 0)
1088 {
1089 tlen = MIN (len, 4 - (memaddr & 3));
1090 offset = memaddr & 3;
1091
1092 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1093 if (errcode != 0)
1094 {
1095 /* The transfer request might have crossed the boundary to an
1096 unallocated region of memory. Retry the transfer, requesting
1097 a single byte. */
1098 tlen = 1;
1099 offset = 0;
1100 errcode = target_read_memory (memaddr, buf, 1);
1101 if (errcode != 0)
1102 goto done;
1103 }
1104
1105 if (bufptr - buffer + tlen > buffer_allocated)
1106 {
1107 unsigned int bytes;
1108
1109 bytes = bufptr - buffer;
1110 buffer_allocated *= 2;
1111 buffer = xrealloc (buffer, buffer_allocated);
1112 bufptr = buffer + bytes;
1113 }
1114
1115 for (i = 0; i < tlen; i++)
1116 {
1117 *bufptr++ = buf[i + offset];
1118 if (buf[i + offset] == '\000')
1119 {
1120 nbytes_read += i + 1;
1121 goto done;
1122 }
1123 }
1124
1125 memaddr += tlen;
1126 len -= tlen;
1127 nbytes_read += tlen;
1128 }
1129 done:
1130 *string = buffer;
1131 if (errnop != NULL)
1132 *errnop = errcode;
1133 return nbytes_read;
1134 }
1135
1136 struct target_section_table *
1137 target_get_section_table (struct target_ops *target)
1138 {
1139 struct target_ops *t;
1140
1141 if (targetdebug)
1142 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1143
1144 for (t = target; t != NULL; t = t->beneath)
1145 if (t->to_get_section_table != NULL)
1146 return (*t->to_get_section_table) (t);
1147
1148 return NULL;
1149 }
1150
1151 /* Find a section containing ADDR. */
1152
1153 struct target_section *
1154 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1155 {
1156 struct target_section_table *table = target_get_section_table (target);
1157 struct target_section *secp;
1158
1159 if (table == NULL)
1160 return NULL;
1161
1162 for (secp = table->sections; secp < table->sections_end; secp++)
1163 {
1164 if (addr >= secp->addr && addr < secp->endaddr)
1165 return secp;
1166 }
1167 return NULL;
1168 }
1169
1170 /* Read memory from the live target, even if currently inspecting a
1171 traceframe. The return is the same as that of target_read. */
1172
1173 static enum target_xfer_status
1174 target_read_live_memory (enum target_object object,
1175 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1176 ULONGEST *xfered_len)
1177 {
1178 enum target_xfer_status ret;
1179 struct cleanup *cleanup;
1180
1181 /* Switch momentarily out of tfind mode so to access live memory.
1182 Note that this must not clear global state, such as the frame
1183 cache, which must still remain valid for the previous traceframe.
1184 We may be _building_ the frame cache at this point. */
1185 cleanup = make_cleanup_restore_traceframe_number ();
1186 set_traceframe_number (-1);
1187
1188 ret = target_xfer_partial (current_target.beneath, object, NULL,
1189 myaddr, NULL, memaddr, len, xfered_len);
1190
1191 do_cleanups (cleanup);
1192 return ret;
1193 }
1194
1195 /* Using the set of read-only target sections of OPS, read live
1196 read-only memory. Note that the actual reads start from the
1197 top-most target again.
1198
1199 For interface/parameters/return description see target.h,
1200 to_xfer_partial. */
1201
1202 static enum target_xfer_status
1203 memory_xfer_live_readonly_partial (struct target_ops *ops,
1204 enum target_object object,
1205 gdb_byte *readbuf, ULONGEST memaddr,
1206 ULONGEST len, ULONGEST *xfered_len)
1207 {
1208 struct target_section *secp;
1209 struct target_section_table *table;
1210
1211 secp = target_section_by_addr (ops, memaddr);
1212 if (secp != NULL
1213 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1214 secp->the_bfd_section)
1215 & SEC_READONLY))
1216 {
1217 struct target_section *p;
1218 ULONGEST memend = memaddr + len;
1219
1220 table = target_get_section_table (ops);
1221
1222 for (p = table->sections; p < table->sections_end; p++)
1223 {
1224 if (memaddr >= p->addr)
1225 {
1226 if (memend <= p->endaddr)
1227 {
1228 /* Entire transfer is within this section. */
1229 return target_read_live_memory (object, memaddr,
1230 readbuf, len, xfered_len);
1231 }
1232 else if (memaddr >= p->endaddr)
1233 {
1234 /* This section ends before the transfer starts. */
1235 continue;
1236 }
1237 else
1238 {
1239 /* This section overlaps the transfer. Just do half. */
1240 len = p->endaddr - memaddr;
1241 return target_read_live_memory (object, memaddr,
1242 readbuf, len, xfered_len);
1243 }
1244 }
1245 }
1246 }
1247
1248 return TARGET_XFER_EOF;
1249 }
1250
1251 /* Read memory from more than one valid target. A core file, for
1252 instance, could have some of memory but delegate other bits to
1253 the target below it. So, we must manually try all targets. */
1254
1255 static enum target_xfer_status
1256 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1257 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1258 ULONGEST *xfered_len)
1259 {
1260 enum target_xfer_status res;
1261
1262 do
1263 {
1264 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1265 readbuf, writebuf, memaddr, len,
1266 xfered_len);
1267 if (res == TARGET_XFER_OK)
1268 break;
1269
1270 /* Stop if the target reports that the memory is not available. */
1271 if (res == TARGET_XFER_E_UNAVAILABLE)
1272 break;
1273
1274 /* We want to continue past core files to executables, but not
1275 past a running target's memory. */
1276 if (ops->to_has_all_memory (ops))
1277 break;
1278
1279 ops = ops->beneath;
1280 }
1281 while (ops != NULL);
1282
1283 return res;
1284 }
1285
1286 /* Perform a partial memory transfer.
1287 For docs see target.h, to_xfer_partial. */
1288
1289 static enum target_xfer_status
1290 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1291 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1292 ULONGEST len, ULONGEST *xfered_len)
1293 {
1294 enum target_xfer_status res;
1295 int reg_len;
1296 struct mem_region *region;
1297 struct inferior *inf;
1298
1299 /* For accesses to unmapped overlay sections, read directly from
1300 files. Must do this first, as MEMADDR may need adjustment. */
1301 if (readbuf != NULL && overlay_debugging)
1302 {
1303 struct obj_section *section = find_pc_overlay (memaddr);
1304
1305 if (pc_in_unmapped_range (memaddr, section))
1306 {
1307 struct target_section_table *table
1308 = target_get_section_table (ops);
1309 const char *section_name = section->the_bfd_section->name;
1310
1311 memaddr = overlay_mapped_address (memaddr, section);
1312 return section_table_xfer_memory_partial (readbuf, writebuf,
1313 memaddr, len, xfered_len,
1314 table->sections,
1315 table->sections_end,
1316 section_name);
1317 }
1318 }
1319
1320 /* Try the executable files, if "trust-readonly-sections" is set. */
1321 if (readbuf != NULL && trust_readonly)
1322 {
1323 struct target_section *secp;
1324 struct target_section_table *table;
1325
1326 secp = target_section_by_addr (ops, memaddr);
1327 if (secp != NULL
1328 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1329 secp->the_bfd_section)
1330 & SEC_READONLY))
1331 {
1332 table = target_get_section_table (ops);
1333 return section_table_xfer_memory_partial (readbuf, writebuf,
1334 memaddr, len, xfered_len,
1335 table->sections,
1336 table->sections_end,
1337 NULL);
1338 }
1339 }
1340
1341 /* If reading unavailable memory in the context of traceframes, and
1342 this address falls within a read-only section, fallback to
1343 reading from live memory. */
1344 if (readbuf != NULL && get_traceframe_number () != -1)
1345 {
1346 VEC(mem_range_s) *available;
1347
1348 /* If we fail to get the set of available memory, then the
1349 target does not support querying traceframe info, and so we
1350 attempt reading from the traceframe anyway (assuming the
1351 target implements the old QTro packet then). */
1352 if (traceframe_available_memory (&available, memaddr, len))
1353 {
1354 struct cleanup *old_chain;
1355
1356 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1357
1358 if (VEC_empty (mem_range_s, available)
1359 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1360 {
1361 /* Don't read into the traceframe's available
1362 memory. */
1363 if (!VEC_empty (mem_range_s, available))
1364 {
1365 LONGEST oldlen = len;
1366
1367 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1368 gdb_assert (len <= oldlen);
1369 }
1370
1371 do_cleanups (old_chain);
1372
1373 /* This goes through the topmost target again. */
1374 res = memory_xfer_live_readonly_partial (ops, object,
1375 readbuf, memaddr,
1376 len, xfered_len);
1377 if (res == TARGET_XFER_OK)
1378 return TARGET_XFER_OK;
1379 else
1380 {
1381 /* No use trying further, we know some memory starting
1382 at MEMADDR isn't available. */
1383 *xfered_len = len;
1384 return TARGET_XFER_E_UNAVAILABLE;
1385 }
1386 }
1387
1388 /* Don't try to read more than how much is available, in
1389 case the target implements the deprecated QTro packet to
1390 cater for older GDBs (the target's knowledge of read-only
1391 sections may be outdated by now). */
1392 len = VEC_index (mem_range_s, available, 0)->length;
1393
1394 do_cleanups (old_chain);
1395 }
1396 }
1397
1398 /* Try GDB's internal data cache. */
1399 region = lookup_mem_region (memaddr);
1400 /* region->hi == 0 means there's no upper bound. */
1401 if (memaddr + len < region->hi || region->hi == 0)
1402 reg_len = len;
1403 else
1404 reg_len = region->hi - memaddr;
1405
1406 switch (region->attrib.mode)
1407 {
1408 case MEM_RO:
1409 if (writebuf != NULL)
1410 return TARGET_XFER_E_IO;
1411 break;
1412
1413 case MEM_WO:
1414 if (readbuf != NULL)
1415 return TARGET_XFER_E_IO;
1416 break;
1417
1418 case MEM_FLASH:
1419 /* We only support writing to flash during "load" for now. */
1420 if (writebuf != NULL)
1421 error (_("Writing to flash memory forbidden in this context"));
1422 break;
1423
1424 case MEM_NONE:
1425 return TARGET_XFER_E_IO;
1426 }
1427
1428 if (!ptid_equal (inferior_ptid, null_ptid))
1429 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1430 else
1431 inf = NULL;
1432
1433 if (inf != NULL
1434 /* The dcache reads whole cache lines; that doesn't play well
1435 with reading from a trace buffer, because reading outside of
1436 the collected memory range fails. */
1437 && get_traceframe_number () == -1
1438 && (region->attrib.cache
1439 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1440 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1441 {
1442 DCACHE *dcache = target_dcache_get_or_init ();
1443 int l;
1444
1445 if (readbuf != NULL)
1446 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1447 else
1448 /* FIXME drow/2006-08-09: If we're going to preserve const
1449 correctness dcache_xfer_memory should take readbuf and
1450 writebuf. */
1451 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1452 reg_len, 1);
1453 if (l <= 0)
1454 return TARGET_XFER_E_IO;
1455 else
1456 {
1457 *xfered_len = (ULONGEST) l;
1458 return TARGET_XFER_OK;
1459 }
1460 }
1461
1462 /* If none of those methods found the memory we wanted, fall back
1463 to a target partial transfer. Normally a single call to
1464 to_xfer_partial is enough; if it doesn't recognize an object
1465 it will call the to_xfer_partial of the next target down.
1466 But for memory this won't do. Memory is the only target
1467 object which can be read from more than one valid target.
1468 A core file, for instance, could have some of memory but
1469 delegate other bits to the target below it. So, we must
1470 manually try all targets. */
1471
1472 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1473 xfered_len);
1474
1475 /* Make sure the cache gets updated no matter what - if we are writing
1476 to the stack. Even if this write is not tagged as such, we still need
1477 to update the cache. */
1478
1479 if (res == TARGET_XFER_OK
1480 && inf != NULL
1481 && writebuf != NULL
1482 && target_dcache_init_p ()
1483 && !region->attrib.cache
1484 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1485 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1486 {
1487 DCACHE *dcache = target_dcache_get ();
1488
1489 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1490 }
1491
1492 /* If we still haven't got anything, return the last error. We
1493 give up. */
1494 return res;
1495 }
1496
1497 /* Perform a partial memory transfer. For docs see target.h,
1498 to_xfer_partial. */
1499
1500 static enum target_xfer_status
1501 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1502 gdb_byte *readbuf, const gdb_byte *writebuf,
1503 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1504 {
1505 enum target_xfer_status res;
1506
1507 /* Zero length requests are ok and require no work. */
1508 if (len == 0)
1509 return TARGET_XFER_EOF;
1510
1511 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1512 breakpoint insns, thus hiding out from higher layers whether
1513 there are software breakpoints inserted in the code stream. */
1514 if (readbuf != NULL)
1515 {
1516 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1517 xfered_len);
1518
1519 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1520 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1521 }
1522 else
1523 {
1524 void *buf;
1525 struct cleanup *old_chain;
1526
1527 /* A large write request is likely to be partially satisfied
1528 by memory_xfer_partial_1. We will continually malloc
1529 and free a copy of the entire write request for breakpoint
1530 shadow handling even though we only end up writing a small
1531 subset of it. Cap writes to 4KB to mitigate this. */
1532 len = min (4096, len);
1533
1534 buf = xmalloc (len);
1535 old_chain = make_cleanup (xfree, buf);
1536 memcpy (buf, writebuf, len);
1537
1538 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1539 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1540 xfered_len);
1541
1542 do_cleanups (old_chain);
1543 }
1544
1545 return res;
1546 }
1547
1548 static void
1549 restore_show_memory_breakpoints (void *arg)
1550 {
1551 show_memory_breakpoints = (uintptr_t) arg;
1552 }
1553
1554 struct cleanup *
1555 make_show_memory_breakpoints_cleanup (int show)
1556 {
1557 int current = show_memory_breakpoints;
1558
1559 show_memory_breakpoints = show;
1560 return make_cleanup (restore_show_memory_breakpoints,
1561 (void *) (uintptr_t) current);
1562 }
1563
1564 /* For docs see target.h, to_xfer_partial. */
1565
1566 enum target_xfer_status
1567 target_xfer_partial (struct target_ops *ops,
1568 enum target_object object, const char *annex,
1569 gdb_byte *readbuf, const gdb_byte *writebuf,
1570 ULONGEST offset, ULONGEST len,
1571 ULONGEST *xfered_len)
1572 {
1573 enum target_xfer_status retval;
1574
1575 gdb_assert (ops->to_xfer_partial != NULL);
1576
1577 /* Transfer is done when LEN is zero. */
1578 if (len == 0)
1579 return TARGET_XFER_EOF;
1580
1581 if (writebuf && !may_write_memory)
1582 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1583 core_addr_to_string_nz (offset), plongest (len));
1584
1585 *xfered_len = 0;
1586
1587 /* If this is a memory transfer, let the memory-specific code
1588 have a look at it instead. Memory transfers are more
1589 complicated. */
1590 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1591 || object == TARGET_OBJECT_CODE_MEMORY)
1592 retval = memory_xfer_partial (ops, object, readbuf,
1593 writebuf, offset, len, xfered_len);
1594 else if (object == TARGET_OBJECT_RAW_MEMORY)
1595 {
1596 /* Request the normal memory object from other layers. */
1597 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1598 xfered_len);
1599 }
1600 else
1601 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1602 writebuf, offset, len, xfered_len);
1603
1604 if (targetdebug)
1605 {
1606 const unsigned char *myaddr = NULL;
1607
1608 fprintf_unfiltered (gdb_stdlog,
1609 "%s:target_xfer_partial "
1610 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1611 ops->to_shortname,
1612 (int) object,
1613 (annex ? annex : "(null)"),
1614 host_address_to_string (readbuf),
1615 host_address_to_string (writebuf),
1616 core_addr_to_string_nz (offset),
1617 pulongest (len), retval,
1618 pulongest (*xfered_len));
1619
1620 if (readbuf)
1621 myaddr = readbuf;
1622 if (writebuf)
1623 myaddr = writebuf;
1624 if (retval == TARGET_XFER_OK && myaddr != NULL)
1625 {
1626 int i;
1627
1628 fputs_unfiltered (", bytes =", gdb_stdlog);
1629 for (i = 0; i < *xfered_len; i++)
1630 {
1631 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1632 {
1633 if (targetdebug < 2 && i > 0)
1634 {
1635 fprintf_unfiltered (gdb_stdlog, " ...");
1636 break;
1637 }
1638 fprintf_unfiltered (gdb_stdlog, "\n");
1639 }
1640
1641 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1642 }
1643 }
1644
1645 fputc_unfiltered ('\n', gdb_stdlog);
1646 }
1647
1648 /* Check implementations of to_xfer_partial update *XFERED_LEN
1649 properly. Do assertion after printing debug messages, so that we
1650 can find more clues on assertion failure from debugging messages. */
1651 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1652 gdb_assert (*xfered_len > 0);
1653
1654 return retval;
1655 }
1656
1657 /* Read LEN bytes of target memory at address MEMADDR, placing the
1658 results in GDB's memory at MYADDR. Returns either 0 for success or
1659 TARGET_XFER_E_IO if any error occurs.
1660
1661 If an error occurs, no guarantee is made about the contents of the data at
1662 MYADDR. In particular, the caller should not depend upon partial reads
1663 filling the buffer with good data. There is no way for the caller to know
1664 how much good data might have been transfered anyway. Callers that can
1665 deal with partial reads should call target_read (which will retry until
1666 it makes no progress, and then return how much was transferred). */
1667
1668 int
1669 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1670 {
1671 /* Dispatch to the topmost target, not the flattened current_target.
1672 Memory accesses check target->to_has_(all_)memory, and the
1673 flattened target doesn't inherit those. */
1674 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1675 myaddr, memaddr, len) == len)
1676 return 0;
1677 else
1678 return TARGET_XFER_E_IO;
1679 }
1680
1681 /* Like target_read_memory, but specify explicitly that this is a read
1682 from the target's raw memory. That is, this read bypasses the
1683 dcache, breakpoint shadowing, etc. */
1684
1685 int
1686 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1687 {
1688 /* See comment in target_read_memory about why the request starts at
1689 current_target.beneath. */
1690 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1691 myaddr, memaddr, len) == len)
1692 return 0;
1693 else
1694 return TARGET_XFER_E_IO;
1695 }
1696
1697 /* Like target_read_memory, but specify explicitly that this is a read from
1698 the target's stack. This may trigger different cache behavior. */
1699
1700 int
1701 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1702 {
1703 /* See comment in target_read_memory about why the request starts at
1704 current_target.beneath. */
1705 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1706 myaddr, memaddr, len) == len)
1707 return 0;
1708 else
1709 return TARGET_XFER_E_IO;
1710 }
1711
1712 /* Like target_read_memory, but specify explicitly that this is a read from
1713 the target's code. This may trigger different cache behavior. */
1714
1715 int
1716 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1717 {
1718 /* See comment in target_read_memory about why the request starts at
1719 current_target.beneath. */
1720 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1721 myaddr, memaddr, len) == len)
1722 return 0;
1723 else
1724 return TARGET_XFER_E_IO;
1725 }
1726
1727 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1728 Returns either 0 for success or TARGET_XFER_E_IO if any
1729 error occurs. If an error occurs, no guarantee is made about how
1730 much data got written. Callers that can deal with partial writes
1731 should call target_write. */
1732
1733 int
1734 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1735 {
1736 /* See comment in target_read_memory about why the request starts at
1737 current_target.beneath. */
1738 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1739 myaddr, memaddr, len) == len)
1740 return 0;
1741 else
1742 return TARGET_XFER_E_IO;
1743 }
1744
1745 /* Write LEN bytes from MYADDR to target raw memory at address
1746 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1747 if any error occurs. If an error occurs, no guarantee is made
1748 about how much data got written. Callers that can deal with
1749 partial writes should call target_write. */
1750
1751 int
1752 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1753 {
1754 /* See comment in target_read_memory about why the request starts at
1755 current_target.beneath. */
1756 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1757 myaddr, memaddr, len) == len)
1758 return 0;
1759 else
1760 return TARGET_XFER_E_IO;
1761 }
1762
1763 /* Fetch the target's memory map. */
1764
1765 VEC(mem_region_s) *
1766 target_memory_map (void)
1767 {
1768 VEC(mem_region_s) *result;
1769 struct mem_region *last_one, *this_one;
1770 int ix;
1771 struct target_ops *t;
1772
1773 if (targetdebug)
1774 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1775
1776 for (t = current_target.beneath; t != NULL; t = t->beneath)
1777 if (t->to_memory_map != NULL)
1778 break;
1779
1780 if (t == NULL)
1781 return NULL;
1782
1783 result = t->to_memory_map (t);
1784 if (result == NULL)
1785 return NULL;
1786
1787 qsort (VEC_address (mem_region_s, result),
1788 VEC_length (mem_region_s, result),
1789 sizeof (struct mem_region), mem_region_cmp);
1790
1791 /* Check that regions do not overlap. Simultaneously assign
1792 a numbering for the "mem" commands to use to refer to
1793 each region. */
1794 last_one = NULL;
1795 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1796 {
1797 this_one->number = ix;
1798
1799 if (last_one && last_one->hi > this_one->lo)
1800 {
1801 warning (_("Overlapping regions in memory map: ignoring"));
1802 VEC_free (mem_region_s, result);
1803 return NULL;
1804 }
1805 last_one = this_one;
1806 }
1807
1808 return result;
1809 }
1810
1811 void
1812 target_flash_erase (ULONGEST address, LONGEST length)
1813 {
1814 struct target_ops *t;
1815
1816 for (t = current_target.beneath; t != NULL; t = t->beneath)
1817 if (t->to_flash_erase != NULL)
1818 {
1819 if (targetdebug)
1820 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1821 hex_string (address), phex (length, 0));
1822 t->to_flash_erase (t, address, length);
1823 return;
1824 }
1825
1826 tcomplain ();
1827 }
1828
1829 void
1830 target_flash_done (void)
1831 {
1832 struct target_ops *t;
1833
1834 for (t = current_target.beneath; t != NULL; t = t->beneath)
1835 if (t->to_flash_done != NULL)
1836 {
1837 if (targetdebug)
1838 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1839 t->to_flash_done (t);
1840 return;
1841 }
1842
1843 tcomplain ();
1844 }
1845
1846 static void
1847 show_trust_readonly (struct ui_file *file, int from_tty,
1848 struct cmd_list_element *c, const char *value)
1849 {
1850 fprintf_filtered (file,
1851 _("Mode for reading from readonly sections is %s.\n"),
1852 value);
1853 }
1854
1855 /* More generic transfers. */
1856
1857 static enum target_xfer_status
1858 default_xfer_partial (struct target_ops *ops, enum target_object object,
1859 const char *annex, gdb_byte *readbuf,
1860 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1861 ULONGEST *xfered_len)
1862 {
1863 if (object == TARGET_OBJECT_MEMORY
1864 && ops->deprecated_xfer_memory != NULL)
1865 /* If available, fall back to the target's
1866 "deprecated_xfer_memory" method. */
1867 {
1868 int xfered = -1;
1869
1870 errno = 0;
1871 if (writebuf != NULL)
1872 {
1873 void *buffer = xmalloc (len);
1874 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1875
1876 memcpy (buffer, writebuf, len);
1877 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1878 1/*write*/, NULL, ops);
1879 do_cleanups (cleanup);
1880 }
1881 if (readbuf != NULL)
1882 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1883 0/*read*/, NULL, ops);
1884 if (xfered > 0)
1885 {
1886 *xfered_len = (ULONGEST) xfered;
1887 return TARGET_XFER_E_IO;
1888 }
1889 else if (xfered == 0 && errno == 0)
1890 /* "deprecated_xfer_memory" uses 0, cross checked against
1891 ERRNO as one indication of an error. */
1892 return TARGET_XFER_EOF;
1893 else
1894 return TARGET_XFER_E_IO;
1895 }
1896 else
1897 {
1898 gdb_assert (ops->beneath != NULL);
1899 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1900 readbuf, writebuf, offset, len,
1901 xfered_len);
1902 }
1903 }
1904
1905 /* Target vector read/write partial wrapper functions. */
1906
1907 static enum target_xfer_status
1908 target_read_partial (struct target_ops *ops,
1909 enum target_object object,
1910 const char *annex, gdb_byte *buf,
1911 ULONGEST offset, ULONGEST len,
1912 ULONGEST *xfered_len)
1913 {
1914 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1915 xfered_len);
1916 }
1917
1918 static enum target_xfer_status
1919 target_write_partial (struct target_ops *ops,
1920 enum target_object object,
1921 const char *annex, const gdb_byte *buf,
1922 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1923 {
1924 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1925 xfered_len);
1926 }
1927
1928 /* Wrappers to perform the full transfer. */
1929
1930 /* For docs on target_read see target.h. */
1931
1932 LONGEST
1933 target_read (struct target_ops *ops,
1934 enum target_object object,
1935 const char *annex, gdb_byte *buf,
1936 ULONGEST offset, LONGEST len)
1937 {
1938 LONGEST xfered = 0;
1939
1940 while (xfered < len)
1941 {
1942 ULONGEST xfered_len;
1943 enum target_xfer_status status;
1944
1945 status = target_read_partial (ops, object, annex,
1946 (gdb_byte *) buf + xfered,
1947 offset + xfered, len - xfered,
1948 &xfered_len);
1949
1950 /* Call an observer, notifying them of the xfer progress? */
1951 if (status == TARGET_XFER_EOF)
1952 return xfered;
1953 else if (status == TARGET_XFER_OK)
1954 {
1955 xfered += xfered_len;
1956 QUIT;
1957 }
1958 else
1959 return -1;
1960
1961 }
1962 return len;
1963 }
1964
1965 /* Assuming that the entire [begin, end) range of memory cannot be
1966 read, try to read whatever subrange is possible to read.
1967
1968 The function returns, in RESULT, either zero or one memory block.
1969 If there's a readable subrange at the beginning, it is completely
1970 read and returned. Any further readable subrange will not be read.
1971 Otherwise, if there's a readable subrange at the end, it will be
1972 completely read and returned. Any readable subranges before it
1973 (obviously, not starting at the beginning), will be ignored. In
1974 other cases -- either no readable subrange, or readable subrange(s)
1975 that is neither at the beginning, or end, nothing is returned.
1976
1977 The purpose of this function is to handle a read across a boundary
1978 of accessible memory in a case when memory map is not available.
1979 The above restrictions are fine for this case, but will give
1980 incorrect results if the memory is 'patchy'. However, supporting
1981 'patchy' memory would require trying to read every single byte,
1982 and it seems unacceptable solution. Explicit memory map is
1983 recommended for this case -- and target_read_memory_robust will
1984 take care of reading multiple ranges then. */
1985
1986 static void
1987 read_whatever_is_readable (struct target_ops *ops,
1988 ULONGEST begin, ULONGEST end,
1989 VEC(memory_read_result_s) **result)
1990 {
1991 gdb_byte *buf = xmalloc (end - begin);
1992 ULONGEST current_begin = begin;
1993 ULONGEST current_end = end;
1994 int forward;
1995 memory_read_result_s r;
1996 ULONGEST xfered_len;
1997
1998 /* If we previously failed to read 1 byte, nothing can be done here. */
1999 if (end - begin <= 1)
2000 {
2001 xfree (buf);
2002 return;
2003 }
2004
2005 /* Check that either first or the last byte is readable, and give up
2006 if not. This heuristic is meant to permit reading accessible memory
2007 at the boundary of accessible region. */
2008 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2009 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2010 {
2011 forward = 1;
2012 ++current_begin;
2013 }
2014 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2015 buf + (end-begin) - 1, end - 1, 1,
2016 &xfered_len) == TARGET_XFER_OK)
2017 {
2018 forward = 0;
2019 --current_end;
2020 }
2021 else
2022 {
2023 xfree (buf);
2024 return;
2025 }
2026
2027 /* Loop invariant is that the [current_begin, current_end) was previously
2028 found to be not readable as a whole.
2029
2030 Note loop condition -- if the range has 1 byte, we can't divide the range
2031 so there's no point trying further. */
2032 while (current_end - current_begin > 1)
2033 {
2034 ULONGEST first_half_begin, first_half_end;
2035 ULONGEST second_half_begin, second_half_end;
2036 LONGEST xfer;
2037 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2038
2039 if (forward)
2040 {
2041 first_half_begin = current_begin;
2042 first_half_end = middle;
2043 second_half_begin = middle;
2044 second_half_end = current_end;
2045 }
2046 else
2047 {
2048 first_half_begin = middle;
2049 first_half_end = current_end;
2050 second_half_begin = current_begin;
2051 second_half_end = middle;
2052 }
2053
2054 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2055 buf + (first_half_begin - begin),
2056 first_half_begin,
2057 first_half_end - first_half_begin);
2058
2059 if (xfer == first_half_end - first_half_begin)
2060 {
2061 /* This half reads up fine. So, the error must be in the
2062 other half. */
2063 current_begin = second_half_begin;
2064 current_end = second_half_end;
2065 }
2066 else
2067 {
2068 /* This half is not readable. Because we've tried one byte, we
2069 know some part of this half if actually redable. Go to the next
2070 iteration to divide again and try to read.
2071
2072 We don't handle the other half, because this function only tries
2073 to read a single readable subrange. */
2074 current_begin = first_half_begin;
2075 current_end = first_half_end;
2076 }
2077 }
2078
2079 if (forward)
2080 {
2081 /* The [begin, current_begin) range has been read. */
2082 r.begin = begin;
2083 r.end = current_begin;
2084 r.data = buf;
2085 }
2086 else
2087 {
2088 /* The [current_end, end) range has been read. */
2089 LONGEST rlen = end - current_end;
2090
2091 r.data = xmalloc (rlen);
2092 memcpy (r.data, buf + current_end - begin, rlen);
2093 r.begin = current_end;
2094 r.end = end;
2095 xfree (buf);
2096 }
2097 VEC_safe_push(memory_read_result_s, (*result), &r);
2098 }
2099
2100 void
2101 free_memory_read_result_vector (void *x)
2102 {
2103 VEC(memory_read_result_s) *v = x;
2104 memory_read_result_s *current;
2105 int ix;
2106
2107 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2108 {
2109 xfree (current->data);
2110 }
2111 VEC_free (memory_read_result_s, v);
2112 }
2113
2114 VEC(memory_read_result_s) *
2115 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2116 {
2117 VEC(memory_read_result_s) *result = 0;
2118
2119 LONGEST xfered = 0;
2120 while (xfered < len)
2121 {
2122 struct mem_region *region = lookup_mem_region (offset + xfered);
2123 LONGEST rlen;
2124
2125 /* If there is no explicit region, a fake one should be created. */
2126 gdb_assert (region);
2127
2128 if (region->hi == 0)
2129 rlen = len - xfered;
2130 else
2131 rlen = region->hi - offset;
2132
2133 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2134 {
2135 /* Cannot read this region. Note that we can end up here only
2136 if the region is explicitly marked inaccessible, or
2137 'inaccessible-by-default' is in effect. */
2138 xfered += rlen;
2139 }
2140 else
2141 {
2142 LONGEST to_read = min (len - xfered, rlen);
2143 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2144
2145 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2146 (gdb_byte *) buffer,
2147 offset + xfered, to_read);
2148 /* Call an observer, notifying them of the xfer progress? */
2149 if (xfer <= 0)
2150 {
2151 /* Got an error reading full chunk. See if maybe we can read
2152 some subrange. */
2153 xfree (buffer);
2154 read_whatever_is_readable (ops, offset + xfered,
2155 offset + xfered + to_read, &result);
2156 xfered += to_read;
2157 }
2158 else
2159 {
2160 struct memory_read_result r;
2161 r.data = buffer;
2162 r.begin = offset + xfered;
2163 r.end = r.begin + xfer;
2164 VEC_safe_push (memory_read_result_s, result, &r);
2165 xfered += xfer;
2166 }
2167 QUIT;
2168 }
2169 }
2170 return result;
2171 }
2172
2173
2174 /* An alternative to target_write with progress callbacks. */
2175
2176 LONGEST
2177 target_write_with_progress (struct target_ops *ops,
2178 enum target_object object,
2179 const char *annex, const gdb_byte *buf,
2180 ULONGEST offset, LONGEST len,
2181 void (*progress) (ULONGEST, void *), void *baton)
2182 {
2183 LONGEST xfered = 0;
2184
2185 /* Give the progress callback a chance to set up. */
2186 if (progress)
2187 (*progress) (0, baton);
2188
2189 while (xfered < len)
2190 {
2191 ULONGEST xfered_len;
2192 enum target_xfer_status status;
2193
2194 status = target_write_partial (ops, object, annex,
2195 (gdb_byte *) buf + xfered,
2196 offset + xfered, len - xfered,
2197 &xfered_len);
2198
2199 if (status == TARGET_XFER_EOF)
2200 return xfered;
2201 if (TARGET_XFER_STATUS_ERROR_P (status))
2202 return -1;
2203
2204 gdb_assert (status == TARGET_XFER_OK);
2205 if (progress)
2206 (*progress) (xfered_len, baton);
2207
2208 xfered += xfered_len;
2209 QUIT;
2210 }
2211 return len;
2212 }
2213
2214 /* For docs on target_write see target.h. */
2215
2216 LONGEST
2217 target_write (struct target_ops *ops,
2218 enum target_object object,
2219 const char *annex, const gdb_byte *buf,
2220 ULONGEST offset, LONGEST len)
2221 {
2222 return target_write_with_progress (ops, object, annex, buf, offset, len,
2223 NULL, NULL);
2224 }
2225
2226 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2227 the size of the transferred data. PADDING additional bytes are
2228 available in *BUF_P. This is a helper function for
2229 target_read_alloc; see the declaration of that function for more
2230 information. */
2231
2232 static LONGEST
2233 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2234 const char *annex, gdb_byte **buf_p, int padding)
2235 {
2236 size_t buf_alloc, buf_pos;
2237 gdb_byte *buf;
2238
2239 /* This function does not have a length parameter; it reads the
2240 entire OBJECT). Also, it doesn't support objects fetched partly
2241 from one target and partly from another (in a different stratum,
2242 e.g. a core file and an executable). Both reasons make it
2243 unsuitable for reading memory. */
2244 gdb_assert (object != TARGET_OBJECT_MEMORY);
2245
2246 /* Start by reading up to 4K at a time. The target will throttle
2247 this number down if necessary. */
2248 buf_alloc = 4096;
2249 buf = xmalloc (buf_alloc);
2250 buf_pos = 0;
2251 while (1)
2252 {
2253 ULONGEST xfered_len;
2254 enum target_xfer_status status;
2255
2256 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2257 buf_pos, buf_alloc - buf_pos - padding,
2258 &xfered_len);
2259
2260 if (status == TARGET_XFER_EOF)
2261 {
2262 /* Read all there was. */
2263 if (buf_pos == 0)
2264 xfree (buf);
2265 else
2266 *buf_p = buf;
2267 return buf_pos;
2268 }
2269 else if (status != TARGET_XFER_OK)
2270 {
2271 /* An error occurred. */
2272 xfree (buf);
2273 return TARGET_XFER_E_IO;
2274 }
2275
2276 buf_pos += xfered_len;
2277
2278 /* If the buffer is filling up, expand it. */
2279 if (buf_alloc < buf_pos * 2)
2280 {
2281 buf_alloc *= 2;
2282 buf = xrealloc (buf, buf_alloc);
2283 }
2284
2285 QUIT;
2286 }
2287 }
2288
2289 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2290 the size of the transferred data. See the declaration in "target.h"
2291 function for more information about the return value. */
2292
2293 LONGEST
2294 target_read_alloc (struct target_ops *ops, enum target_object object,
2295 const char *annex, gdb_byte **buf_p)
2296 {
2297 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2298 }
2299
2300 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2301 returned as a string, allocated using xmalloc. If an error occurs
2302 or the transfer is unsupported, NULL is returned. Empty objects
2303 are returned as allocated but empty strings. A warning is issued
2304 if the result contains any embedded NUL bytes. */
2305
2306 char *
2307 target_read_stralloc (struct target_ops *ops, enum target_object object,
2308 const char *annex)
2309 {
2310 gdb_byte *buffer;
2311 char *bufstr;
2312 LONGEST i, transferred;
2313
2314 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2315 bufstr = (char *) buffer;
2316
2317 if (transferred < 0)
2318 return NULL;
2319
2320 if (transferred == 0)
2321 return xstrdup ("");
2322
2323 bufstr[transferred] = 0;
2324
2325 /* Check for embedded NUL bytes; but allow trailing NULs. */
2326 for (i = strlen (bufstr); i < transferred; i++)
2327 if (bufstr[i] != 0)
2328 {
2329 warning (_("target object %d, annex %s, "
2330 "contained unexpected null characters"),
2331 (int) object, annex ? annex : "(none)");
2332 break;
2333 }
2334
2335 return bufstr;
2336 }
2337
2338 /* Memory transfer methods. */
2339
2340 void
2341 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2342 LONGEST len)
2343 {
2344 /* This method is used to read from an alternate, non-current
2345 target. This read must bypass the overlay support (as symbols
2346 don't match this target), and GDB's internal cache (wrong cache
2347 for this target). */
2348 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2349 != len)
2350 memory_error (TARGET_XFER_E_IO, addr);
2351 }
2352
2353 ULONGEST
2354 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2355 int len, enum bfd_endian byte_order)
2356 {
2357 gdb_byte buf[sizeof (ULONGEST)];
2358
2359 gdb_assert (len <= sizeof (buf));
2360 get_target_memory (ops, addr, buf, len);
2361 return extract_unsigned_integer (buf, len, byte_order);
2362 }
2363
2364 /* See target.h. */
2365
2366 int
2367 target_insert_breakpoint (struct gdbarch *gdbarch,
2368 struct bp_target_info *bp_tgt)
2369 {
2370 if (!may_insert_breakpoints)
2371 {
2372 warning (_("May not insert breakpoints"));
2373 return 1;
2374 }
2375
2376 return current_target.to_insert_breakpoint (&current_target,
2377 gdbarch, bp_tgt);
2378 }
2379
2380 /* See target.h. */
2381
2382 int
2383 target_remove_breakpoint (struct gdbarch *gdbarch,
2384 struct bp_target_info *bp_tgt)
2385 {
2386 /* This is kind of a weird case to handle, but the permission might
2387 have been changed after breakpoints were inserted - in which case
2388 we should just take the user literally and assume that any
2389 breakpoints should be left in place. */
2390 if (!may_insert_breakpoints)
2391 {
2392 warning (_("May not remove breakpoints"));
2393 return 1;
2394 }
2395
2396 return current_target.to_remove_breakpoint (&current_target,
2397 gdbarch, bp_tgt);
2398 }
2399
2400 static void
2401 target_info (char *args, int from_tty)
2402 {
2403 struct target_ops *t;
2404 int has_all_mem = 0;
2405
2406 if (symfile_objfile != NULL)
2407 printf_unfiltered (_("Symbols from \"%s\".\n"),
2408 objfile_name (symfile_objfile));
2409
2410 for (t = target_stack; t != NULL; t = t->beneath)
2411 {
2412 if (!(*t->to_has_memory) (t))
2413 continue;
2414
2415 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2416 continue;
2417 if (has_all_mem)
2418 printf_unfiltered (_("\tWhile running this, "
2419 "GDB does not access memory from...\n"));
2420 printf_unfiltered ("%s:\n", t->to_longname);
2421 (t->to_files_info) (t);
2422 has_all_mem = (*t->to_has_all_memory) (t);
2423 }
2424 }
2425
2426 /* This function is called before any new inferior is created, e.g.
2427 by running a program, attaching, or connecting to a target.
2428 It cleans up any state from previous invocations which might
2429 change between runs. This is a subset of what target_preopen
2430 resets (things which might change between targets). */
2431
2432 void
2433 target_pre_inferior (int from_tty)
2434 {
2435 /* Clear out solib state. Otherwise the solib state of the previous
2436 inferior might have survived and is entirely wrong for the new
2437 target. This has been observed on GNU/Linux using glibc 2.3. How
2438 to reproduce:
2439
2440 bash$ ./foo&
2441 [1] 4711
2442 bash$ ./foo&
2443 [1] 4712
2444 bash$ gdb ./foo
2445 [...]
2446 (gdb) attach 4711
2447 (gdb) detach
2448 (gdb) attach 4712
2449 Cannot access memory at address 0xdeadbeef
2450 */
2451
2452 /* In some OSs, the shared library list is the same/global/shared
2453 across inferiors. If code is shared between processes, so are
2454 memory regions and features. */
2455 if (!gdbarch_has_global_solist (target_gdbarch ()))
2456 {
2457 no_shared_libraries (NULL, from_tty);
2458
2459 invalidate_target_mem_regions ();
2460
2461 target_clear_description ();
2462 }
2463
2464 agent_capability_invalidate ();
2465 }
2466
2467 /* Callback for iterate_over_inferiors. Gets rid of the given
2468 inferior. */
2469
2470 static int
2471 dispose_inferior (struct inferior *inf, void *args)
2472 {
2473 struct thread_info *thread;
2474
2475 thread = any_thread_of_process (inf->pid);
2476 if (thread)
2477 {
2478 switch_to_thread (thread->ptid);
2479
2480 /* Core inferiors actually should be detached, not killed. */
2481 if (target_has_execution)
2482 target_kill ();
2483 else
2484 target_detach (NULL, 0);
2485 }
2486
2487 return 0;
2488 }
2489
2490 /* This is to be called by the open routine before it does
2491 anything. */
2492
2493 void
2494 target_preopen (int from_tty)
2495 {
2496 dont_repeat ();
2497
2498 if (have_inferiors ())
2499 {
2500 if (!from_tty
2501 || !have_live_inferiors ()
2502 || query (_("A program is being debugged already. Kill it? ")))
2503 iterate_over_inferiors (dispose_inferior, NULL);
2504 else
2505 error (_("Program not killed."));
2506 }
2507
2508 /* Calling target_kill may remove the target from the stack. But if
2509 it doesn't (which seems like a win for UDI), remove it now. */
2510 /* Leave the exec target, though. The user may be switching from a
2511 live process to a core of the same program. */
2512 pop_all_targets_above (file_stratum);
2513
2514 target_pre_inferior (from_tty);
2515 }
2516
2517 /* Detach a target after doing deferred register stores. */
2518
2519 void
2520 target_detach (const char *args, int from_tty)
2521 {
2522 struct target_ops* t;
2523
2524 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2525 /* Don't remove global breakpoints here. They're removed on
2526 disconnection from the target. */
2527 ;
2528 else
2529 /* If we're in breakpoints-always-inserted mode, have to remove
2530 them before detaching. */
2531 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2532
2533 prepare_for_detach ();
2534
2535 current_target.to_detach (&current_target, args, from_tty);
2536 if (targetdebug)
2537 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2538 args, from_tty);
2539 }
2540
2541 void
2542 target_disconnect (char *args, int from_tty)
2543 {
2544 struct target_ops *t;
2545
2546 /* If we're in breakpoints-always-inserted mode or if breakpoints
2547 are global across processes, we have to remove them before
2548 disconnecting. */
2549 remove_breakpoints ();
2550
2551 for (t = current_target.beneath; t != NULL; t = t->beneath)
2552 if (t->to_disconnect != NULL)
2553 {
2554 if (targetdebug)
2555 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2556 args, from_tty);
2557 t->to_disconnect (t, args, from_tty);
2558 return;
2559 }
2560
2561 tcomplain ();
2562 }
2563
2564 ptid_t
2565 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2566 {
2567 struct target_ops *t;
2568 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2569 status, options);
2570
2571 if (targetdebug)
2572 {
2573 char *status_string;
2574 char *options_string;
2575
2576 status_string = target_waitstatus_to_string (status);
2577 options_string = target_options_to_string (options);
2578 fprintf_unfiltered (gdb_stdlog,
2579 "target_wait (%d, status, options={%s})"
2580 " = %d, %s\n",
2581 ptid_get_pid (ptid), options_string,
2582 ptid_get_pid (retval), status_string);
2583 xfree (status_string);
2584 xfree (options_string);
2585 }
2586
2587 return retval;
2588 }
2589
2590 char *
2591 target_pid_to_str (ptid_t ptid)
2592 {
2593 struct target_ops *t;
2594
2595 for (t = current_target.beneath; t != NULL; t = t->beneath)
2596 {
2597 if (t->to_pid_to_str != NULL)
2598 return (*t->to_pid_to_str) (t, ptid);
2599 }
2600
2601 return normal_pid_to_str (ptid);
2602 }
2603
2604 char *
2605 target_thread_name (struct thread_info *info)
2606 {
2607 return current_target.to_thread_name (&current_target, info);
2608 }
2609
2610 void
2611 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2612 {
2613 struct target_ops *t;
2614
2615 target_dcache_invalidate ();
2616
2617 current_target.to_resume (&current_target, ptid, step, signal);
2618 if (targetdebug)
2619 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2620 ptid_get_pid (ptid),
2621 step ? "step" : "continue",
2622 gdb_signal_to_name (signal));
2623
2624 registers_changed_ptid (ptid);
2625 set_executing (ptid, 1);
2626 set_running (ptid, 1);
2627 clear_inline_frame_state (ptid);
2628 }
2629
2630 void
2631 target_pass_signals (int numsigs, unsigned char *pass_signals)
2632 {
2633 struct target_ops *t;
2634
2635 for (t = current_target.beneath; t != NULL; t = t->beneath)
2636 {
2637 if (t->to_pass_signals != NULL)
2638 {
2639 if (targetdebug)
2640 {
2641 int i;
2642
2643 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2644 numsigs);
2645
2646 for (i = 0; i < numsigs; i++)
2647 if (pass_signals[i])
2648 fprintf_unfiltered (gdb_stdlog, " %s",
2649 gdb_signal_to_name (i));
2650
2651 fprintf_unfiltered (gdb_stdlog, " })\n");
2652 }
2653
2654 (*t->to_pass_signals) (t, numsigs, pass_signals);
2655 return;
2656 }
2657 }
2658 }
2659
2660 void
2661 target_program_signals (int numsigs, unsigned char *program_signals)
2662 {
2663 struct target_ops *t;
2664
2665 for (t = current_target.beneath; t != NULL; t = t->beneath)
2666 {
2667 if (t->to_program_signals != NULL)
2668 {
2669 if (targetdebug)
2670 {
2671 int i;
2672
2673 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2674 numsigs);
2675
2676 for (i = 0; i < numsigs; i++)
2677 if (program_signals[i])
2678 fprintf_unfiltered (gdb_stdlog, " %s",
2679 gdb_signal_to_name (i));
2680
2681 fprintf_unfiltered (gdb_stdlog, " })\n");
2682 }
2683
2684 (*t->to_program_signals) (t, numsigs, program_signals);
2685 return;
2686 }
2687 }
2688 }
2689
2690 /* Look through the list of possible targets for a target that can
2691 follow forks. */
2692
2693 int
2694 target_follow_fork (int follow_child, int detach_fork)
2695 {
2696 struct target_ops *t;
2697
2698 for (t = current_target.beneath; t != NULL; t = t->beneath)
2699 {
2700 if (t->to_follow_fork != NULL)
2701 {
2702 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2703
2704 if (targetdebug)
2705 fprintf_unfiltered (gdb_stdlog,
2706 "target_follow_fork (%d, %d) = %d\n",
2707 follow_child, detach_fork, retval);
2708 return retval;
2709 }
2710 }
2711
2712 /* Some target returned a fork event, but did not know how to follow it. */
2713 internal_error (__FILE__, __LINE__,
2714 _("could not find a target to follow fork"));
2715 }
2716
2717 void
2718 target_mourn_inferior (void)
2719 {
2720 struct target_ops *t;
2721
2722 for (t = current_target.beneath; t != NULL; t = t->beneath)
2723 {
2724 if (t->to_mourn_inferior != NULL)
2725 {
2726 t->to_mourn_inferior (t);
2727 if (targetdebug)
2728 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2729
2730 /* We no longer need to keep handles on any of the object files.
2731 Make sure to release them to avoid unnecessarily locking any
2732 of them while we're not actually debugging. */
2733 bfd_cache_close_all ();
2734
2735 return;
2736 }
2737 }
2738
2739 internal_error (__FILE__, __LINE__,
2740 _("could not find a target to follow mourn inferior"));
2741 }
2742
2743 /* Look for a target which can describe architectural features, starting
2744 from TARGET. If we find one, return its description. */
2745
2746 const struct target_desc *
2747 target_read_description (struct target_ops *target)
2748 {
2749 struct target_ops *t;
2750
2751 for (t = target; t != NULL; t = t->beneath)
2752 if (t->to_read_description != NULL)
2753 {
2754 const struct target_desc *tdesc;
2755
2756 tdesc = t->to_read_description (t);
2757 if (tdesc)
2758 return tdesc;
2759 }
2760
2761 return NULL;
2762 }
2763
2764 /* The default implementation of to_search_memory.
2765 This implements a basic search of memory, reading target memory and
2766 performing the search here (as opposed to performing the search in on the
2767 target side with, for example, gdbserver). */
2768
2769 int
2770 simple_search_memory (struct target_ops *ops,
2771 CORE_ADDR start_addr, ULONGEST search_space_len,
2772 const gdb_byte *pattern, ULONGEST pattern_len,
2773 CORE_ADDR *found_addrp)
2774 {
2775 /* NOTE: also defined in find.c testcase. */
2776 #define SEARCH_CHUNK_SIZE 16000
2777 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2778 /* Buffer to hold memory contents for searching. */
2779 gdb_byte *search_buf;
2780 unsigned search_buf_size;
2781 struct cleanup *old_cleanups;
2782
2783 search_buf_size = chunk_size + pattern_len - 1;
2784
2785 /* No point in trying to allocate a buffer larger than the search space. */
2786 if (search_space_len < search_buf_size)
2787 search_buf_size = search_space_len;
2788
2789 search_buf = malloc (search_buf_size);
2790 if (search_buf == NULL)
2791 error (_("Unable to allocate memory to perform the search."));
2792 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2793
2794 /* Prime the search buffer. */
2795
2796 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2797 search_buf, start_addr, search_buf_size) != search_buf_size)
2798 {
2799 warning (_("Unable to access %s bytes of target "
2800 "memory at %s, halting search."),
2801 pulongest (search_buf_size), hex_string (start_addr));
2802 do_cleanups (old_cleanups);
2803 return -1;
2804 }
2805
2806 /* Perform the search.
2807
2808 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2809 When we've scanned N bytes we copy the trailing bytes to the start and
2810 read in another N bytes. */
2811
2812 while (search_space_len >= pattern_len)
2813 {
2814 gdb_byte *found_ptr;
2815 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2816
2817 found_ptr = memmem (search_buf, nr_search_bytes,
2818 pattern, pattern_len);
2819
2820 if (found_ptr != NULL)
2821 {
2822 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2823
2824 *found_addrp = found_addr;
2825 do_cleanups (old_cleanups);
2826 return 1;
2827 }
2828
2829 /* Not found in this chunk, skip to next chunk. */
2830
2831 /* Don't let search_space_len wrap here, it's unsigned. */
2832 if (search_space_len >= chunk_size)
2833 search_space_len -= chunk_size;
2834 else
2835 search_space_len = 0;
2836
2837 if (search_space_len >= pattern_len)
2838 {
2839 unsigned keep_len = search_buf_size - chunk_size;
2840 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2841 int nr_to_read;
2842
2843 /* Copy the trailing part of the previous iteration to the front
2844 of the buffer for the next iteration. */
2845 gdb_assert (keep_len == pattern_len - 1);
2846 memcpy (search_buf, search_buf + chunk_size, keep_len);
2847
2848 nr_to_read = min (search_space_len - keep_len, chunk_size);
2849
2850 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2851 search_buf + keep_len, read_addr,
2852 nr_to_read) != nr_to_read)
2853 {
2854 warning (_("Unable to access %s bytes of target "
2855 "memory at %s, halting search."),
2856 plongest (nr_to_read),
2857 hex_string (read_addr));
2858 do_cleanups (old_cleanups);
2859 return -1;
2860 }
2861
2862 start_addr += chunk_size;
2863 }
2864 }
2865
2866 /* Not found. */
2867
2868 do_cleanups (old_cleanups);
2869 return 0;
2870 }
2871
2872 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2873 sequence of bytes in PATTERN with length PATTERN_LEN.
2874
2875 The result is 1 if found, 0 if not found, and -1 if there was an error
2876 requiring halting of the search (e.g. memory read error).
2877 If the pattern is found the address is recorded in FOUND_ADDRP. */
2878
2879 int
2880 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2881 const gdb_byte *pattern, ULONGEST pattern_len,
2882 CORE_ADDR *found_addrp)
2883 {
2884 struct target_ops *t;
2885 int found;
2886
2887 /* We don't use INHERIT to set current_target.to_search_memory,
2888 so we have to scan the target stack and handle targetdebug
2889 ourselves. */
2890
2891 if (targetdebug)
2892 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2893 hex_string (start_addr));
2894
2895 for (t = current_target.beneath; t != NULL; t = t->beneath)
2896 if (t->to_search_memory != NULL)
2897 break;
2898
2899 if (t != NULL)
2900 {
2901 found = t->to_search_memory (t, start_addr, search_space_len,
2902 pattern, pattern_len, found_addrp);
2903 }
2904 else
2905 {
2906 /* If a special version of to_search_memory isn't available, use the
2907 simple version. */
2908 found = simple_search_memory (current_target.beneath,
2909 start_addr, search_space_len,
2910 pattern, pattern_len, found_addrp);
2911 }
2912
2913 if (targetdebug)
2914 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2915
2916 return found;
2917 }
2918
2919 /* Look through the currently pushed targets. If none of them will
2920 be able to restart the currently running process, issue an error
2921 message. */
2922
2923 void
2924 target_require_runnable (void)
2925 {
2926 struct target_ops *t;
2927
2928 for (t = target_stack; t != NULL; t = t->beneath)
2929 {
2930 /* If this target knows how to create a new program, then
2931 assume we will still be able to after killing the current
2932 one. Either killing and mourning will not pop T, or else
2933 find_default_run_target will find it again. */
2934 if (t->to_create_inferior != NULL)
2935 return;
2936
2937 /* Do not worry about thread_stratum targets that can not
2938 create inferiors. Assume they will be pushed again if
2939 necessary, and continue to the process_stratum. */
2940 if (t->to_stratum == thread_stratum
2941 || t->to_stratum == arch_stratum)
2942 continue;
2943
2944 error (_("The \"%s\" target does not support \"run\". "
2945 "Try \"help target\" or \"continue\"."),
2946 t->to_shortname);
2947 }
2948
2949 /* This function is only called if the target is running. In that
2950 case there should have been a process_stratum target and it
2951 should either know how to create inferiors, or not... */
2952 internal_error (__FILE__, __LINE__, _("No targets found"));
2953 }
2954
2955 /* Look through the list of possible targets for a target that can
2956 execute a run or attach command without any other data. This is
2957 used to locate the default process stratum.
2958
2959 If DO_MESG is not NULL, the result is always valid (error() is
2960 called for errors); else, return NULL on error. */
2961
2962 static struct target_ops *
2963 find_default_run_target (char *do_mesg)
2964 {
2965 struct target_ops **t;
2966 struct target_ops *runable = NULL;
2967 int count;
2968
2969 count = 0;
2970
2971 for (t = target_structs; t < target_structs + target_struct_size;
2972 ++t)
2973 {
2974 if ((*t)->to_can_run && target_can_run (*t))
2975 {
2976 runable = *t;
2977 ++count;
2978 }
2979 }
2980
2981 if (count != 1)
2982 {
2983 if (do_mesg)
2984 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2985 else
2986 return NULL;
2987 }
2988
2989 return runable;
2990 }
2991
2992 void
2993 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2994 {
2995 struct target_ops *t;
2996
2997 t = find_default_run_target ("attach");
2998 (t->to_attach) (t, args, from_tty);
2999 return;
3000 }
3001
3002 void
3003 find_default_create_inferior (struct target_ops *ops,
3004 char *exec_file, char *allargs, char **env,
3005 int from_tty)
3006 {
3007 struct target_ops *t;
3008
3009 t = find_default_run_target ("run");
3010 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3011 return;
3012 }
3013
3014 static int
3015 find_default_can_async_p (struct target_ops *ignore)
3016 {
3017 struct target_ops *t;
3018
3019 /* This may be called before the target is pushed on the stack;
3020 look for the default process stratum. If there's none, gdb isn't
3021 configured with a native debugger, and target remote isn't
3022 connected yet. */
3023 t = find_default_run_target (NULL);
3024 if (t && t->to_can_async_p != delegate_can_async_p)
3025 return (t->to_can_async_p) (t);
3026 return 0;
3027 }
3028
3029 static int
3030 find_default_is_async_p (struct target_ops *ignore)
3031 {
3032 struct target_ops *t;
3033
3034 /* This may be called before the target is pushed on the stack;
3035 look for the default process stratum. If there's none, gdb isn't
3036 configured with a native debugger, and target remote isn't
3037 connected yet. */
3038 t = find_default_run_target (NULL);
3039 if (t && t->to_is_async_p != delegate_is_async_p)
3040 return (t->to_is_async_p) (t);
3041 return 0;
3042 }
3043
3044 static int
3045 find_default_supports_non_stop (struct target_ops *self)
3046 {
3047 struct target_ops *t;
3048
3049 t = find_default_run_target (NULL);
3050 if (t && t->to_supports_non_stop)
3051 return (t->to_supports_non_stop) (t);
3052 return 0;
3053 }
3054
3055 int
3056 target_supports_non_stop (void)
3057 {
3058 struct target_ops *t;
3059
3060 for (t = &current_target; t != NULL; t = t->beneath)
3061 if (t->to_supports_non_stop)
3062 return t->to_supports_non_stop (t);
3063
3064 return 0;
3065 }
3066
3067 /* Implement the "info proc" command. */
3068
3069 int
3070 target_info_proc (char *args, enum info_proc_what what)
3071 {
3072 struct target_ops *t;
3073
3074 /* If we're already connected to something that can get us OS
3075 related data, use it. Otherwise, try using the native
3076 target. */
3077 if (current_target.to_stratum >= process_stratum)
3078 t = current_target.beneath;
3079 else
3080 t = find_default_run_target (NULL);
3081
3082 for (; t != NULL; t = t->beneath)
3083 {
3084 if (t->to_info_proc != NULL)
3085 {
3086 t->to_info_proc (t, args, what);
3087
3088 if (targetdebug)
3089 fprintf_unfiltered (gdb_stdlog,
3090 "target_info_proc (\"%s\", %d)\n", args, what);
3091
3092 return 1;
3093 }
3094 }
3095
3096 return 0;
3097 }
3098
3099 static int
3100 find_default_supports_disable_randomization (struct target_ops *self)
3101 {
3102 struct target_ops *t;
3103
3104 t = find_default_run_target (NULL);
3105 if (t && t->to_supports_disable_randomization)
3106 return (t->to_supports_disable_randomization) (t);
3107 return 0;
3108 }
3109
3110 int
3111 target_supports_disable_randomization (void)
3112 {
3113 struct target_ops *t;
3114
3115 for (t = &current_target; t != NULL; t = t->beneath)
3116 if (t->to_supports_disable_randomization)
3117 return t->to_supports_disable_randomization (t);
3118
3119 return 0;
3120 }
3121
3122 char *
3123 target_get_osdata (const char *type)
3124 {
3125 struct target_ops *t;
3126
3127 /* If we're already connected to something that can get us OS
3128 related data, use it. Otherwise, try using the native
3129 target. */
3130 if (current_target.to_stratum >= process_stratum)
3131 t = current_target.beneath;
3132 else
3133 t = find_default_run_target ("get OS data");
3134
3135 if (!t)
3136 return NULL;
3137
3138 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3139 }
3140
3141 /* Determine the current address space of thread PTID. */
3142
3143 struct address_space *
3144 target_thread_address_space (ptid_t ptid)
3145 {
3146 struct address_space *aspace;
3147 struct inferior *inf;
3148 struct target_ops *t;
3149
3150 for (t = current_target.beneath; t != NULL; t = t->beneath)
3151 {
3152 if (t->to_thread_address_space != NULL)
3153 {
3154 aspace = t->to_thread_address_space (t, ptid);
3155 gdb_assert (aspace);
3156
3157 if (targetdebug)
3158 fprintf_unfiltered (gdb_stdlog,
3159 "target_thread_address_space (%s) = %d\n",
3160 target_pid_to_str (ptid),
3161 address_space_num (aspace));
3162 return aspace;
3163 }
3164 }
3165
3166 /* Fall-back to the "main" address space of the inferior. */
3167 inf = find_inferior_pid (ptid_get_pid (ptid));
3168
3169 if (inf == NULL || inf->aspace == NULL)
3170 internal_error (__FILE__, __LINE__,
3171 _("Can't determine the current "
3172 "address space of thread %s\n"),
3173 target_pid_to_str (ptid));
3174
3175 return inf->aspace;
3176 }
3177
3178
3179 /* Target file operations. */
3180
3181 static struct target_ops *
3182 default_fileio_target (void)
3183 {
3184 /* If we're already connected to something that can perform
3185 file I/O, use it. Otherwise, try using the native target. */
3186 if (current_target.to_stratum >= process_stratum)
3187 return current_target.beneath;
3188 else
3189 return find_default_run_target ("file I/O");
3190 }
3191
3192 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3193 target file descriptor, or -1 if an error occurs (and set
3194 *TARGET_ERRNO). */
3195 int
3196 target_fileio_open (const char *filename, int flags, int mode,
3197 int *target_errno)
3198 {
3199 struct target_ops *t;
3200
3201 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3202 {
3203 if (t->to_fileio_open != NULL)
3204 {
3205 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3206
3207 if (targetdebug)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3210 filename, flags, mode,
3211 fd, fd != -1 ? 0 : *target_errno);
3212 return fd;
3213 }
3214 }
3215
3216 *target_errno = FILEIO_ENOSYS;
3217 return -1;
3218 }
3219
3220 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3221 Return the number of bytes written, or -1 if an error occurs
3222 (and set *TARGET_ERRNO). */
3223 int
3224 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3225 ULONGEST offset, int *target_errno)
3226 {
3227 struct target_ops *t;
3228
3229 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3230 {
3231 if (t->to_fileio_pwrite != NULL)
3232 {
3233 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3234 target_errno);
3235
3236 if (targetdebug)
3237 fprintf_unfiltered (gdb_stdlog,
3238 "target_fileio_pwrite (%d,...,%d,%s) "
3239 "= %d (%d)\n",
3240 fd, len, pulongest (offset),
3241 ret, ret != -1 ? 0 : *target_errno);
3242 return ret;
3243 }
3244 }
3245
3246 *target_errno = FILEIO_ENOSYS;
3247 return -1;
3248 }
3249
3250 /* Read up to LEN bytes FD on the target into READ_BUF.
3251 Return the number of bytes read, or -1 if an error occurs
3252 (and set *TARGET_ERRNO). */
3253 int
3254 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3255 ULONGEST offset, int *target_errno)
3256 {
3257 struct target_ops *t;
3258
3259 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3260 {
3261 if (t->to_fileio_pread != NULL)
3262 {
3263 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3264 target_errno);
3265
3266 if (targetdebug)
3267 fprintf_unfiltered (gdb_stdlog,
3268 "target_fileio_pread (%d,...,%d,%s) "
3269 "= %d (%d)\n",
3270 fd, len, pulongest (offset),
3271 ret, ret != -1 ? 0 : *target_errno);
3272 return ret;
3273 }
3274 }
3275
3276 *target_errno = FILEIO_ENOSYS;
3277 return -1;
3278 }
3279
3280 /* Close FD on the target. Return 0, or -1 if an error occurs
3281 (and set *TARGET_ERRNO). */
3282 int
3283 target_fileio_close (int fd, int *target_errno)
3284 {
3285 struct target_ops *t;
3286
3287 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3288 {
3289 if (t->to_fileio_close != NULL)
3290 {
3291 int ret = t->to_fileio_close (t, fd, target_errno);
3292
3293 if (targetdebug)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "target_fileio_close (%d) = %d (%d)\n",
3296 fd, ret, ret != -1 ? 0 : *target_errno);
3297 return ret;
3298 }
3299 }
3300
3301 *target_errno = FILEIO_ENOSYS;
3302 return -1;
3303 }
3304
3305 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3306 occurs (and set *TARGET_ERRNO). */
3307 int
3308 target_fileio_unlink (const char *filename, int *target_errno)
3309 {
3310 struct target_ops *t;
3311
3312 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3313 {
3314 if (t->to_fileio_unlink != NULL)
3315 {
3316 int ret = t->to_fileio_unlink (t, filename, target_errno);
3317
3318 if (targetdebug)
3319 fprintf_unfiltered (gdb_stdlog,
3320 "target_fileio_unlink (%s) = %d (%d)\n",
3321 filename, ret, ret != -1 ? 0 : *target_errno);
3322 return ret;
3323 }
3324 }
3325
3326 *target_errno = FILEIO_ENOSYS;
3327 return -1;
3328 }
3329
3330 /* Read value of symbolic link FILENAME on the target. Return a
3331 null-terminated string allocated via xmalloc, or NULL if an error
3332 occurs (and set *TARGET_ERRNO). */
3333 char *
3334 target_fileio_readlink (const char *filename, int *target_errno)
3335 {
3336 struct target_ops *t;
3337
3338 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3339 {
3340 if (t->to_fileio_readlink != NULL)
3341 {
3342 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3343
3344 if (targetdebug)
3345 fprintf_unfiltered (gdb_stdlog,
3346 "target_fileio_readlink (%s) = %s (%d)\n",
3347 filename, ret? ret : "(nil)",
3348 ret? 0 : *target_errno);
3349 return ret;
3350 }
3351 }
3352
3353 *target_errno = FILEIO_ENOSYS;
3354 return NULL;
3355 }
3356
3357 static void
3358 target_fileio_close_cleanup (void *opaque)
3359 {
3360 int fd = *(int *) opaque;
3361 int target_errno;
3362
3363 target_fileio_close (fd, &target_errno);
3364 }
3365
3366 /* Read target file FILENAME. Store the result in *BUF_P and
3367 return the size of the transferred data. PADDING additional bytes are
3368 available in *BUF_P. This is a helper function for
3369 target_fileio_read_alloc; see the declaration of that function for more
3370 information. */
3371
3372 static LONGEST
3373 target_fileio_read_alloc_1 (const char *filename,
3374 gdb_byte **buf_p, int padding)
3375 {
3376 struct cleanup *close_cleanup;
3377 size_t buf_alloc, buf_pos;
3378 gdb_byte *buf;
3379 LONGEST n;
3380 int fd;
3381 int target_errno;
3382
3383 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3384 if (fd == -1)
3385 return -1;
3386
3387 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3388
3389 /* Start by reading up to 4K at a time. The target will throttle
3390 this number down if necessary. */
3391 buf_alloc = 4096;
3392 buf = xmalloc (buf_alloc);
3393 buf_pos = 0;
3394 while (1)
3395 {
3396 n = target_fileio_pread (fd, &buf[buf_pos],
3397 buf_alloc - buf_pos - padding, buf_pos,
3398 &target_errno);
3399 if (n < 0)
3400 {
3401 /* An error occurred. */
3402 do_cleanups (close_cleanup);
3403 xfree (buf);
3404 return -1;
3405 }
3406 else if (n == 0)
3407 {
3408 /* Read all there was. */
3409 do_cleanups (close_cleanup);
3410 if (buf_pos == 0)
3411 xfree (buf);
3412 else
3413 *buf_p = buf;
3414 return buf_pos;
3415 }
3416
3417 buf_pos += n;
3418
3419 /* If the buffer is filling up, expand it. */
3420 if (buf_alloc < buf_pos * 2)
3421 {
3422 buf_alloc *= 2;
3423 buf = xrealloc (buf, buf_alloc);
3424 }
3425
3426 QUIT;
3427 }
3428 }
3429
3430 /* Read target file FILENAME. Store the result in *BUF_P and return
3431 the size of the transferred data. See the declaration in "target.h"
3432 function for more information about the return value. */
3433
3434 LONGEST
3435 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3436 {
3437 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3438 }
3439
3440 /* Read target file FILENAME. The result is NUL-terminated and
3441 returned as a string, allocated using xmalloc. If an error occurs
3442 or the transfer is unsupported, NULL is returned. Empty objects
3443 are returned as allocated but empty strings. A warning is issued
3444 if the result contains any embedded NUL bytes. */
3445
3446 char *
3447 target_fileio_read_stralloc (const char *filename)
3448 {
3449 gdb_byte *buffer;
3450 char *bufstr;
3451 LONGEST i, transferred;
3452
3453 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3454 bufstr = (char *) buffer;
3455
3456 if (transferred < 0)
3457 return NULL;
3458
3459 if (transferred == 0)
3460 return xstrdup ("");
3461
3462 bufstr[transferred] = 0;
3463
3464 /* Check for embedded NUL bytes; but allow trailing NULs. */
3465 for (i = strlen (bufstr); i < transferred; i++)
3466 if (bufstr[i] != 0)
3467 {
3468 warning (_("target file %s "
3469 "contained unexpected null characters"),
3470 filename);
3471 break;
3472 }
3473
3474 return bufstr;
3475 }
3476
3477
3478 static int
3479 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3480 CORE_ADDR addr, int len)
3481 {
3482 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3483 }
3484
3485 static int
3486 default_watchpoint_addr_within_range (struct target_ops *target,
3487 CORE_ADDR addr,
3488 CORE_ADDR start, int length)
3489 {
3490 return addr >= start && addr < start + length;
3491 }
3492
3493 static struct gdbarch *
3494 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3495 {
3496 return target_gdbarch ();
3497 }
3498
3499 static int
3500 return_zero (void)
3501 {
3502 return 0;
3503 }
3504
3505 static void *
3506 return_null (void)
3507 {
3508 return 0;
3509 }
3510
3511 /*
3512 * Find the next target down the stack from the specified target.
3513 */
3514
3515 struct target_ops *
3516 find_target_beneath (struct target_ops *t)
3517 {
3518 return t->beneath;
3519 }
3520
3521 /* See target.h. */
3522
3523 struct target_ops *
3524 find_target_at (enum strata stratum)
3525 {
3526 struct target_ops *t;
3527
3528 for (t = current_target.beneath; t != NULL; t = t->beneath)
3529 if (t->to_stratum == stratum)
3530 return t;
3531
3532 return NULL;
3533 }
3534
3535 \f
3536 /* The inferior process has died. Long live the inferior! */
3537
3538 void
3539 generic_mourn_inferior (void)
3540 {
3541 ptid_t ptid;
3542
3543 ptid = inferior_ptid;
3544 inferior_ptid = null_ptid;
3545
3546 /* Mark breakpoints uninserted in case something tries to delete a
3547 breakpoint while we delete the inferior's threads (which would
3548 fail, since the inferior is long gone). */
3549 mark_breakpoints_out ();
3550
3551 if (!ptid_equal (ptid, null_ptid))
3552 {
3553 int pid = ptid_get_pid (ptid);
3554 exit_inferior (pid);
3555 }
3556
3557 /* Note this wipes step-resume breakpoints, so needs to be done
3558 after exit_inferior, which ends up referencing the step-resume
3559 breakpoints through clear_thread_inferior_resources. */
3560 breakpoint_init_inferior (inf_exited);
3561
3562 registers_changed ();
3563
3564 reopen_exec_file ();
3565 reinit_frame_cache ();
3566
3567 if (deprecated_detach_hook)
3568 deprecated_detach_hook ();
3569 }
3570 \f
3571 /* Convert a normal process ID to a string. Returns the string in a
3572 static buffer. */
3573
3574 char *
3575 normal_pid_to_str (ptid_t ptid)
3576 {
3577 static char buf[32];
3578
3579 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3580 return buf;
3581 }
3582
3583 static char *
3584 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3585 {
3586 return normal_pid_to_str (ptid);
3587 }
3588
3589 /* Error-catcher for target_find_memory_regions. */
3590 static int
3591 dummy_find_memory_regions (struct target_ops *self,
3592 find_memory_region_ftype ignore1, void *ignore2)
3593 {
3594 error (_("Command not implemented for this target."));
3595 return 0;
3596 }
3597
3598 /* Error-catcher for target_make_corefile_notes. */
3599 static char *
3600 dummy_make_corefile_notes (struct target_ops *self,
3601 bfd *ignore1, int *ignore2)
3602 {
3603 error (_("Command not implemented for this target."));
3604 return NULL;
3605 }
3606
3607 /* Set up the handful of non-empty slots needed by the dummy target
3608 vector. */
3609
3610 static void
3611 init_dummy_target (void)
3612 {
3613 dummy_target.to_shortname = "None";
3614 dummy_target.to_longname = "None";
3615 dummy_target.to_doc = "";
3616 dummy_target.to_create_inferior = find_default_create_inferior;
3617 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3618 dummy_target.to_supports_disable_randomization
3619 = find_default_supports_disable_randomization;
3620 dummy_target.to_pid_to_str = dummy_pid_to_str;
3621 dummy_target.to_stratum = dummy_stratum;
3622 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3623 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3624 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3625 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3626 dummy_target.to_has_execution
3627 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3628 dummy_target.to_magic = OPS_MAGIC;
3629
3630 install_dummy_methods (&dummy_target);
3631 }
3632 \f
3633 static void
3634 debug_to_open (char *args, int from_tty)
3635 {
3636 debug_target.to_open (args, from_tty);
3637
3638 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3639 }
3640
3641 void
3642 target_close (struct target_ops *targ)
3643 {
3644 gdb_assert (!target_is_pushed (targ));
3645
3646 if (targ->to_xclose != NULL)
3647 targ->to_xclose (targ);
3648 else if (targ->to_close != NULL)
3649 targ->to_close (targ);
3650
3651 if (targetdebug)
3652 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3653 }
3654
3655 void
3656 target_attach (char *args, int from_tty)
3657 {
3658 current_target.to_attach (&current_target, args, from_tty);
3659 if (targetdebug)
3660 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3661 args, from_tty);
3662 }
3663
3664 int
3665 target_thread_alive (ptid_t ptid)
3666 {
3667 struct target_ops *t;
3668
3669 for (t = current_target.beneath; t != NULL; t = t->beneath)
3670 {
3671 if (t->to_thread_alive != NULL)
3672 {
3673 int retval;
3674
3675 retval = t->to_thread_alive (t, ptid);
3676 if (targetdebug)
3677 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3678 ptid_get_pid (ptid), retval);
3679
3680 return retval;
3681 }
3682 }
3683
3684 return 0;
3685 }
3686
3687 void
3688 target_find_new_threads (void)
3689 {
3690 struct target_ops *t;
3691
3692 for (t = current_target.beneath; t != NULL; t = t->beneath)
3693 {
3694 if (t->to_find_new_threads != NULL)
3695 {
3696 t->to_find_new_threads (t);
3697 if (targetdebug)
3698 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3699
3700 return;
3701 }
3702 }
3703 }
3704
3705 void
3706 target_stop (ptid_t ptid)
3707 {
3708 if (!may_stop)
3709 {
3710 warning (_("May not interrupt or stop the target, ignoring attempt"));
3711 return;
3712 }
3713
3714 (*current_target.to_stop) (&current_target, ptid);
3715 }
3716
3717 static void
3718 debug_to_post_attach (struct target_ops *self, int pid)
3719 {
3720 debug_target.to_post_attach (&debug_target, pid);
3721
3722 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3723 }
3724
3725 /* Concatenate ELEM to LIST, a comma separate list, and return the
3726 result. The LIST incoming argument is released. */
3727
3728 static char *
3729 str_comma_list_concat_elem (char *list, const char *elem)
3730 {
3731 if (list == NULL)
3732 return xstrdup (elem);
3733 else
3734 return reconcat (list, list, ", ", elem, (char *) NULL);
3735 }
3736
3737 /* Helper for target_options_to_string. If OPT is present in
3738 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3739 Returns the new resulting string. OPT is removed from
3740 TARGET_OPTIONS. */
3741
3742 static char *
3743 do_option (int *target_options, char *ret,
3744 int opt, char *opt_str)
3745 {
3746 if ((*target_options & opt) != 0)
3747 {
3748 ret = str_comma_list_concat_elem (ret, opt_str);
3749 *target_options &= ~opt;
3750 }
3751
3752 return ret;
3753 }
3754
3755 char *
3756 target_options_to_string (int target_options)
3757 {
3758 char *ret = NULL;
3759
3760 #define DO_TARG_OPTION(OPT) \
3761 ret = do_option (&target_options, ret, OPT, #OPT)
3762
3763 DO_TARG_OPTION (TARGET_WNOHANG);
3764
3765 if (target_options != 0)
3766 ret = str_comma_list_concat_elem (ret, "unknown???");
3767
3768 if (ret == NULL)
3769 ret = xstrdup ("");
3770 return ret;
3771 }
3772
3773 static void
3774 debug_print_register (const char * func,
3775 struct regcache *regcache, int regno)
3776 {
3777 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3778
3779 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3780 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3781 && gdbarch_register_name (gdbarch, regno) != NULL
3782 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3783 fprintf_unfiltered (gdb_stdlog, "(%s)",
3784 gdbarch_register_name (gdbarch, regno));
3785 else
3786 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3787 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3788 {
3789 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3790 int i, size = register_size (gdbarch, regno);
3791 gdb_byte buf[MAX_REGISTER_SIZE];
3792
3793 regcache_raw_collect (regcache, regno, buf);
3794 fprintf_unfiltered (gdb_stdlog, " = ");
3795 for (i = 0; i < size; i++)
3796 {
3797 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3798 }
3799 if (size <= sizeof (LONGEST))
3800 {
3801 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3802
3803 fprintf_unfiltered (gdb_stdlog, " %s %s",
3804 core_addr_to_string_nz (val), plongest (val));
3805 }
3806 }
3807 fprintf_unfiltered (gdb_stdlog, "\n");
3808 }
3809
3810 void
3811 target_fetch_registers (struct regcache *regcache, int regno)
3812 {
3813 struct target_ops *t;
3814
3815 for (t = current_target.beneath; t != NULL; t = t->beneath)
3816 {
3817 if (t->to_fetch_registers != NULL)
3818 {
3819 t->to_fetch_registers (t, regcache, regno);
3820 if (targetdebug)
3821 debug_print_register ("target_fetch_registers", regcache, regno);
3822 return;
3823 }
3824 }
3825 }
3826
3827 void
3828 target_store_registers (struct regcache *regcache, int regno)
3829 {
3830 struct target_ops *t;
3831
3832 if (!may_write_registers)
3833 error (_("Writing to registers is not allowed (regno %d)"), regno);
3834
3835 current_target.to_store_registers (&current_target, regcache, regno);
3836 if (targetdebug)
3837 {
3838 debug_print_register ("target_store_registers", regcache, regno);
3839 }
3840 }
3841
3842 int
3843 target_core_of_thread (ptid_t ptid)
3844 {
3845 struct target_ops *t;
3846
3847 for (t = current_target.beneath; t != NULL; t = t->beneath)
3848 {
3849 if (t->to_core_of_thread != NULL)
3850 {
3851 int retval = t->to_core_of_thread (t, ptid);
3852
3853 if (targetdebug)
3854 fprintf_unfiltered (gdb_stdlog,
3855 "target_core_of_thread (%d) = %d\n",
3856 ptid_get_pid (ptid), retval);
3857 return retval;
3858 }
3859 }
3860
3861 return -1;
3862 }
3863
3864 int
3865 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3866 {
3867 struct target_ops *t;
3868
3869 for (t = current_target.beneath; t != NULL; t = t->beneath)
3870 {
3871 if (t->to_verify_memory != NULL)
3872 {
3873 int retval = t->to_verify_memory (t, data, memaddr, size);
3874
3875 if (targetdebug)
3876 fprintf_unfiltered (gdb_stdlog,
3877 "target_verify_memory (%s, %s) = %d\n",
3878 paddress (target_gdbarch (), memaddr),
3879 pulongest (size),
3880 retval);
3881 return retval;
3882 }
3883 }
3884
3885 tcomplain ();
3886 }
3887
3888 /* The documentation for this function is in its prototype declaration in
3889 target.h. */
3890
3891 int
3892 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3893 {
3894 struct target_ops *t;
3895
3896 for (t = current_target.beneath; t != NULL; t = t->beneath)
3897 if (t->to_insert_mask_watchpoint != NULL)
3898 {
3899 int ret;
3900
3901 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3902
3903 if (targetdebug)
3904 fprintf_unfiltered (gdb_stdlog, "\
3905 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3906 core_addr_to_string (addr),
3907 core_addr_to_string (mask), rw, ret);
3908
3909 return ret;
3910 }
3911
3912 return 1;
3913 }
3914
3915 /* The documentation for this function is in its prototype declaration in
3916 target.h. */
3917
3918 int
3919 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3920 {
3921 struct target_ops *t;
3922
3923 for (t = current_target.beneath; t != NULL; t = t->beneath)
3924 if (t->to_remove_mask_watchpoint != NULL)
3925 {
3926 int ret;
3927
3928 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3929
3930 if (targetdebug)
3931 fprintf_unfiltered (gdb_stdlog, "\
3932 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3933 core_addr_to_string (addr),
3934 core_addr_to_string (mask), rw, ret);
3935
3936 return ret;
3937 }
3938
3939 return 1;
3940 }
3941
3942 /* The documentation for this function is in its prototype declaration
3943 in target.h. */
3944
3945 int
3946 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3947 {
3948 struct target_ops *t;
3949
3950 for (t = current_target.beneath; t != NULL; t = t->beneath)
3951 if (t->to_masked_watch_num_registers != NULL)
3952 return t->to_masked_watch_num_registers (t, addr, mask);
3953
3954 return -1;
3955 }
3956
3957 /* The documentation for this function is in its prototype declaration
3958 in target.h. */
3959
3960 int
3961 target_ranged_break_num_registers (void)
3962 {
3963 struct target_ops *t;
3964
3965 for (t = current_target.beneath; t != NULL; t = t->beneath)
3966 if (t->to_ranged_break_num_registers != NULL)
3967 return t->to_ranged_break_num_registers (t);
3968
3969 return -1;
3970 }
3971
3972 /* See target.h. */
3973
3974 struct btrace_target_info *
3975 target_enable_btrace (ptid_t ptid)
3976 {
3977 struct target_ops *t;
3978
3979 for (t = current_target.beneath; t != NULL; t = t->beneath)
3980 if (t->to_enable_btrace != NULL)
3981 return t->to_enable_btrace (t, ptid);
3982
3983 tcomplain ();
3984 return NULL;
3985 }
3986
3987 /* See target.h. */
3988
3989 void
3990 target_disable_btrace (struct btrace_target_info *btinfo)
3991 {
3992 struct target_ops *t;
3993
3994 for (t = current_target.beneath; t != NULL; t = t->beneath)
3995 if (t->to_disable_btrace != NULL)
3996 {
3997 t->to_disable_btrace (t, btinfo);
3998 return;
3999 }
4000
4001 tcomplain ();
4002 }
4003
4004 /* See target.h. */
4005
4006 void
4007 target_teardown_btrace (struct btrace_target_info *btinfo)
4008 {
4009 struct target_ops *t;
4010
4011 for (t = current_target.beneath; t != NULL; t = t->beneath)
4012 if (t->to_teardown_btrace != NULL)
4013 {
4014 t->to_teardown_btrace (t, btinfo);
4015 return;
4016 }
4017
4018 tcomplain ();
4019 }
4020
4021 /* See target.h. */
4022
4023 enum btrace_error
4024 target_read_btrace (VEC (btrace_block_s) **btrace,
4025 struct btrace_target_info *btinfo,
4026 enum btrace_read_type type)
4027 {
4028 struct target_ops *t;
4029
4030 for (t = current_target.beneath; t != NULL; t = t->beneath)
4031 if (t->to_read_btrace != NULL)
4032 return t->to_read_btrace (t, btrace, btinfo, type);
4033
4034 tcomplain ();
4035 return BTRACE_ERR_NOT_SUPPORTED;
4036 }
4037
4038 /* See target.h. */
4039
4040 void
4041 target_stop_recording (void)
4042 {
4043 struct target_ops *t;
4044
4045 for (t = current_target.beneath; t != NULL; t = t->beneath)
4046 if (t->to_stop_recording != NULL)
4047 {
4048 t->to_stop_recording (t);
4049 return;
4050 }
4051
4052 /* This is optional. */
4053 }
4054
4055 /* See target.h. */
4056
4057 void
4058 target_info_record (void)
4059 {
4060 struct target_ops *t;
4061
4062 for (t = current_target.beneath; t != NULL; t = t->beneath)
4063 if (t->to_info_record != NULL)
4064 {
4065 t->to_info_record (t);
4066 return;
4067 }
4068
4069 tcomplain ();
4070 }
4071
4072 /* See target.h. */
4073
4074 void
4075 target_save_record (const char *filename)
4076 {
4077 struct target_ops *t;
4078
4079 for (t = current_target.beneath; t != NULL; t = t->beneath)
4080 if (t->to_save_record != NULL)
4081 {
4082 t->to_save_record (t, filename);
4083 return;
4084 }
4085
4086 tcomplain ();
4087 }
4088
4089 /* See target.h. */
4090
4091 int
4092 target_supports_delete_record (void)
4093 {
4094 struct target_ops *t;
4095
4096 for (t = current_target.beneath; t != NULL; t = t->beneath)
4097 if (t->to_delete_record != NULL)
4098 return 1;
4099
4100 return 0;
4101 }
4102
4103 /* See target.h. */
4104
4105 void
4106 target_delete_record (void)
4107 {
4108 struct target_ops *t;
4109
4110 for (t = current_target.beneath; t != NULL; t = t->beneath)
4111 if (t->to_delete_record != NULL)
4112 {
4113 t->to_delete_record (t);
4114 return;
4115 }
4116
4117 tcomplain ();
4118 }
4119
4120 /* See target.h. */
4121
4122 int
4123 target_record_is_replaying (void)
4124 {
4125 struct target_ops *t;
4126
4127 for (t = current_target.beneath; t != NULL; t = t->beneath)
4128 if (t->to_record_is_replaying != NULL)
4129 return t->to_record_is_replaying (t);
4130
4131 return 0;
4132 }
4133
4134 /* See target.h. */
4135
4136 void
4137 target_goto_record_begin (void)
4138 {
4139 struct target_ops *t;
4140
4141 for (t = current_target.beneath; t != NULL; t = t->beneath)
4142 if (t->to_goto_record_begin != NULL)
4143 {
4144 t->to_goto_record_begin (t);
4145 return;
4146 }
4147
4148 tcomplain ();
4149 }
4150
4151 /* See target.h. */
4152
4153 void
4154 target_goto_record_end (void)
4155 {
4156 struct target_ops *t;
4157
4158 for (t = current_target.beneath; t != NULL; t = t->beneath)
4159 if (t->to_goto_record_end != NULL)
4160 {
4161 t->to_goto_record_end (t);
4162 return;
4163 }
4164
4165 tcomplain ();
4166 }
4167
4168 /* See target.h. */
4169
4170 void
4171 target_goto_record (ULONGEST insn)
4172 {
4173 struct target_ops *t;
4174
4175 for (t = current_target.beneath; t != NULL; t = t->beneath)
4176 if (t->to_goto_record != NULL)
4177 {
4178 t->to_goto_record (t, insn);
4179 return;
4180 }
4181
4182 tcomplain ();
4183 }
4184
4185 /* See target.h. */
4186
4187 void
4188 target_insn_history (int size, int flags)
4189 {
4190 struct target_ops *t;
4191
4192 for (t = current_target.beneath; t != NULL; t = t->beneath)
4193 if (t->to_insn_history != NULL)
4194 {
4195 t->to_insn_history (t, size, flags);
4196 return;
4197 }
4198
4199 tcomplain ();
4200 }
4201
4202 /* See target.h. */
4203
4204 void
4205 target_insn_history_from (ULONGEST from, int size, int flags)
4206 {
4207 struct target_ops *t;
4208
4209 for (t = current_target.beneath; t != NULL; t = t->beneath)
4210 if (t->to_insn_history_from != NULL)
4211 {
4212 t->to_insn_history_from (t, from, size, flags);
4213 return;
4214 }
4215
4216 tcomplain ();
4217 }
4218
4219 /* See target.h. */
4220
4221 void
4222 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4223 {
4224 struct target_ops *t;
4225
4226 for (t = current_target.beneath; t != NULL; t = t->beneath)
4227 if (t->to_insn_history_range != NULL)
4228 {
4229 t->to_insn_history_range (t, begin, end, flags);
4230 return;
4231 }
4232
4233 tcomplain ();
4234 }
4235
4236 /* See target.h. */
4237
4238 void
4239 target_call_history (int size, int flags)
4240 {
4241 struct target_ops *t;
4242
4243 for (t = current_target.beneath; t != NULL; t = t->beneath)
4244 if (t->to_call_history != NULL)
4245 {
4246 t->to_call_history (t, size, flags);
4247 return;
4248 }
4249
4250 tcomplain ();
4251 }
4252
4253 /* See target.h. */
4254
4255 void
4256 target_call_history_from (ULONGEST begin, int size, int flags)
4257 {
4258 struct target_ops *t;
4259
4260 for (t = current_target.beneath; t != NULL; t = t->beneath)
4261 if (t->to_call_history_from != NULL)
4262 {
4263 t->to_call_history_from (t, begin, size, flags);
4264 return;
4265 }
4266
4267 tcomplain ();
4268 }
4269
4270 /* See target.h. */
4271
4272 void
4273 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4274 {
4275 struct target_ops *t;
4276
4277 for (t = current_target.beneath; t != NULL; t = t->beneath)
4278 if (t->to_call_history_range != NULL)
4279 {
4280 t->to_call_history_range (t, begin, end, flags);
4281 return;
4282 }
4283
4284 tcomplain ();
4285 }
4286
4287 static void
4288 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4289 {
4290 debug_target.to_prepare_to_store (&debug_target, regcache);
4291
4292 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4293 }
4294
4295 /* See target.h. */
4296
4297 const struct frame_unwind *
4298 target_get_unwinder (void)
4299 {
4300 struct target_ops *t;
4301
4302 for (t = current_target.beneath; t != NULL; t = t->beneath)
4303 if (t->to_get_unwinder != NULL)
4304 return t->to_get_unwinder;
4305
4306 return NULL;
4307 }
4308
4309 /* See target.h. */
4310
4311 const struct frame_unwind *
4312 target_get_tailcall_unwinder (void)
4313 {
4314 struct target_ops *t;
4315
4316 for (t = current_target.beneath; t != NULL; t = t->beneath)
4317 if (t->to_get_tailcall_unwinder != NULL)
4318 return t->to_get_tailcall_unwinder;
4319
4320 return NULL;
4321 }
4322
4323 /* See target.h. */
4324
4325 CORE_ADDR
4326 forward_target_decr_pc_after_break (struct target_ops *ops,
4327 struct gdbarch *gdbarch)
4328 {
4329 for (; ops != NULL; ops = ops->beneath)
4330 if (ops->to_decr_pc_after_break != NULL)
4331 return ops->to_decr_pc_after_break (ops, gdbarch);
4332
4333 return gdbarch_decr_pc_after_break (gdbarch);
4334 }
4335
4336 /* See target.h. */
4337
4338 CORE_ADDR
4339 target_decr_pc_after_break (struct gdbarch *gdbarch)
4340 {
4341 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4342 }
4343
4344 static int
4345 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4346 int write, struct mem_attrib *attrib,
4347 struct target_ops *target)
4348 {
4349 int retval;
4350
4351 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4352 attrib, target);
4353
4354 fprintf_unfiltered (gdb_stdlog,
4355 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4356 paddress (target_gdbarch (), memaddr), len,
4357 write ? "write" : "read", retval);
4358
4359 if (retval > 0)
4360 {
4361 int i;
4362
4363 fputs_unfiltered (", bytes =", gdb_stdlog);
4364 for (i = 0; i < retval; i++)
4365 {
4366 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4367 {
4368 if (targetdebug < 2 && i > 0)
4369 {
4370 fprintf_unfiltered (gdb_stdlog, " ...");
4371 break;
4372 }
4373 fprintf_unfiltered (gdb_stdlog, "\n");
4374 }
4375
4376 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4377 }
4378 }
4379
4380 fputc_unfiltered ('\n', gdb_stdlog);
4381
4382 return retval;
4383 }
4384
4385 static void
4386 debug_to_files_info (struct target_ops *target)
4387 {
4388 debug_target.to_files_info (target);
4389
4390 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4391 }
4392
4393 static int
4394 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4395 struct bp_target_info *bp_tgt)
4396 {
4397 int retval;
4398
4399 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4400
4401 fprintf_unfiltered (gdb_stdlog,
4402 "target_insert_breakpoint (%s, xxx) = %ld\n",
4403 core_addr_to_string (bp_tgt->placed_address),
4404 (unsigned long) retval);
4405 return retval;
4406 }
4407
4408 static int
4409 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4410 struct bp_target_info *bp_tgt)
4411 {
4412 int retval;
4413
4414 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4415
4416 fprintf_unfiltered (gdb_stdlog,
4417 "target_remove_breakpoint (%s, xxx) = %ld\n",
4418 core_addr_to_string (bp_tgt->placed_address),
4419 (unsigned long) retval);
4420 return retval;
4421 }
4422
4423 static int
4424 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4425 int type, int cnt, int from_tty)
4426 {
4427 int retval;
4428
4429 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4430 type, cnt, from_tty);
4431
4432 fprintf_unfiltered (gdb_stdlog,
4433 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4434 (unsigned long) type,
4435 (unsigned long) cnt,
4436 (unsigned long) from_tty,
4437 (unsigned long) retval);
4438 return retval;
4439 }
4440
4441 static int
4442 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4443 CORE_ADDR addr, int len)
4444 {
4445 CORE_ADDR retval;
4446
4447 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4448 addr, len);
4449
4450 fprintf_unfiltered (gdb_stdlog,
4451 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4452 core_addr_to_string (addr), (unsigned long) len,
4453 core_addr_to_string (retval));
4454 return retval;
4455 }
4456
4457 static int
4458 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4459 CORE_ADDR addr, int len, int rw,
4460 struct expression *cond)
4461 {
4462 int retval;
4463
4464 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4465 addr, len,
4466 rw, cond);
4467
4468 fprintf_unfiltered (gdb_stdlog,
4469 "target_can_accel_watchpoint_condition "
4470 "(%s, %d, %d, %s) = %ld\n",
4471 core_addr_to_string (addr), len, rw,
4472 host_address_to_string (cond), (unsigned long) retval);
4473 return retval;
4474 }
4475
4476 static int
4477 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4478 {
4479 int retval;
4480
4481 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4482
4483 fprintf_unfiltered (gdb_stdlog,
4484 "target_stopped_by_watchpoint () = %ld\n",
4485 (unsigned long) retval);
4486 return retval;
4487 }
4488
4489 static int
4490 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4491 {
4492 int retval;
4493
4494 retval = debug_target.to_stopped_data_address (target, addr);
4495
4496 fprintf_unfiltered (gdb_stdlog,
4497 "target_stopped_data_address ([%s]) = %ld\n",
4498 core_addr_to_string (*addr),
4499 (unsigned long)retval);
4500 return retval;
4501 }
4502
4503 static int
4504 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4505 CORE_ADDR addr,
4506 CORE_ADDR start, int length)
4507 {
4508 int retval;
4509
4510 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4511 start, length);
4512
4513 fprintf_filtered (gdb_stdlog,
4514 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4515 core_addr_to_string (addr), core_addr_to_string (start),
4516 length, retval);
4517 return retval;
4518 }
4519
4520 static int
4521 debug_to_insert_hw_breakpoint (struct target_ops *self,
4522 struct gdbarch *gdbarch,
4523 struct bp_target_info *bp_tgt)
4524 {
4525 int retval;
4526
4527 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4528 gdbarch, bp_tgt);
4529
4530 fprintf_unfiltered (gdb_stdlog,
4531 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4532 core_addr_to_string (bp_tgt->placed_address),
4533 (unsigned long) retval);
4534 return retval;
4535 }
4536
4537 static int
4538 debug_to_remove_hw_breakpoint (struct target_ops *self,
4539 struct gdbarch *gdbarch,
4540 struct bp_target_info *bp_tgt)
4541 {
4542 int retval;
4543
4544 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4545 gdbarch, bp_tgt);
4546
4547 fprintf_unfiltered (gdb_stdlog,
4548 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4549 core_addr_to_string (bp_tgt->placed_address),
4550 (unsigned long) retval);
4551 return retval;
4552 }
4553
4554 static int
4555 debug_to_insert_watchpoint (struct target_ops *self,
4556 CORE_ADDR addr, int len, int type,
4557 struct expression *cond)
4558 {
4559 int retval;
4560
4561 retval = debug_target.to_insert_watchpoint (&debug_target,
4562 addr, len, type, cond);
4563
4564 fprintf_unfiltered (gdb_stdlog,
4565 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4566 core_addr_to_string (addr), len, type,
4567 host_address_to_string (cond), (unsigned long) retval);
4568 return retval;
4569 }
4570
4571 static int
4572 debug_to_remove_watchpoint (struct target_ops *self,
4573 CORE_ADDR addr, int len, int type,
4574 struct expression *cond)
4575 {
4576 int retval;
4577
4578 retval = debug_target.to_remove_watchpoint (&debug_target,
4579 addr, len, type, cond);
4580
4581 fprintf_unfiltered (gdb_stdlog,
4582 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4583 core_addr_to_string (addr), len, type,
4584 host_address_to_string (cond), (unsigned long) retval);
4585 return retval;
4586 }
4587
4588 static void
4589 debug_to_terminal_init (struct target_ops *self)
4590 {
4591 debug_target.to_terminal_init (&debug_target);
4592
4593 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4594 }
4595
4596 static void
4597 debug_to_terminal_inferior (struct target_ops *self)
4598 {
4599 debug_target.to_terminal_inferior (&debug_target);
4600
4601 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4602 }
4603
4604 static void
4605 debug_to_terminal_ours_for_output (struct target_ops *self)
4606 {
4607 debug_target.to_terminal_ours_for_output (&debug_target);
4608
4609 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4610 }
4611
4612 static void
4613 debug_to_terminal_ours (struct target_ops *self)
4614 {
4615 debug_target.to_terminal_ours (&debug_target);
4616
4617 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4618 }
4619
4620 static void
4621 debug_to_terminal_save_ours (struct target_ops *self)
4622 {
4623 debug_target.to_terminal_save_ours (&debug_target);
4624
4625 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4626 }
4627
4628 static void
4629 debug_to_terminal_info (struct target_ops *self,
4630 const char *arg, int from_tty)
4631 {
4632 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4633
4634 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4635 from_tty);
4636 }
4637
4638 static void
4639 debug_to_load (struct target_ops *self, char *args, int from_tty)
4640 {
4641 debug_target.to_load (&debug_target, args, from_tty);
4642
4643 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4644 }
4645
4646 static void
4647 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4648 {
4649 debug_target.to_post_startup_inferior (&debug_target, ptid);
4650
4651 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4652 ptid_get_pid (ptid));
4653 }
4654
4655 static int
4656 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4657 {
4658 int retval;
4659
4660 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4661
4662 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4663 pid, retval);
4664
4665 return retval;
4666 }
4667
4668 static int
4669 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4670 {
4671 int retval;
4672
4673 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4674
4675 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4676 pid, retval);
4677
4678 return retval;
4679 }
4680
4681 static int
4682 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4683 {
4684 int retval;
4685
4686 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4687
4688 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4689 pid, retval);
4690
4691 return retval;
4692 }
4693
4694 static int
4695 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4696 {
4697 int retval;
4698
4699 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4700
4701 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4702 pid, retval);
4703
4704 return retval;
4705 }
4706
4707 static int
4708 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4709 {
4710 int retval;
4711
4712 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4713
4714 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4715 pid, retval);
4716
4717 return retval;
4718 }
4719
4720 static int
4721 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4722 {
4723 int retval;
4724
4725 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4726
4727 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4728 pid, retval);
4729
4730 return retval;
4731 }
4732
4733 static int
4734 debug_to_has_exited (struct target_ops *self,
4735 int pid, int wait_status, int *exit_status)
4736 {
4737 int has_exited;
4738
4739 has_exited = debug_target.to_has_exited (&debug_target,
4740 pid, wait_status, exit_status);
4741
4742 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4743 pid, wait_status, *exit_status, has_exited);
4744
4745 return has_exited;
4746 }
4747
4748 static int
4749 debug_to_can_run (struct target_ops *self)
4750 {
4751 int retval;
4752
4753 retval = debug_target.to_can_run (&debug_target);
4754
4755 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4756
4757 return retval;
4758 }
4759
4760 static struct gdbarch *
4761 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4762 {
4763 struct gdbarch *retval;
4764
4765 retval = debug_target.to_thread_architecture (ops, ptid);
4766
4767 fprintf_unfiltered (gdb_stdlog,
4768 "target_thread_architecture (%s) = %s [%s]\n",
4769 target_pid_to_str (ptid),
4770 host_address_to_string (retval),
4771 gdbarch_bfd_arch_info (retval)->printable_name);
4772 return retval;
4773 }
4774
4775 static void
4776 debug_to_stop (struct target_ops *self, ptid_t ptid)
4777 {
4778 debug_target.to_stop (&debug_target, ptid);
4779
4780 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4781 target_pid_to_str (ptid));
4782 }
4783
4784 static void
4785 debug_to_rcmd (struct target_ops *self, char *command,
4786 struct ui_file *outbuf)
4787 {
4788 debug_target.to_rcmd (&debug_target, command, outbuf);
4789 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4790 }
4791
4792 static char *
4793 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4794 {
4795 char *exec_file;
4796
4797 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4798
4799 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4800 pid, exec_file);
4801
4802 return exec_file;
4803 }
4804
4805 static void
4806 setup_target_debug (void)
4807 {
4808 memcpy (&debug_target, &current_target, sizeof debug_target);
4809
4810 current_target.to_open = debug_to_open;
4811 current_target.to_post_attach = debug_to_post_attach;
4812 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4813 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4814 current_target.to_files_info = debug_to_files_info;
4815 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4816 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4817 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4818 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4819 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4820 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4821 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4822 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4823 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4824 current_target.to_watchpoint_addr_within_range
4825 = debug_to_watchpoint_addr_within_range;
4826 current_target.to_region_ok_for_hw_watchpoint
4827 = debug_to_region_ok_for_hw_watchpoint;
4828 current_target.to_can_accel_watchpoint_condition
4829 = debug_to_can_accel_watchpoint_condition;
4830 current_target.to_terminal_init = debug_to_terminal_init;
4831 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4832 current_target.to_terminal_ours_for_output
4833 = debug_to_terminal_ours_for_output;
4834 current_target.to_terminal_ours = debug_to_terminal_ours;
4835 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4836 current_target.to_terminal_info = debug_to_terminal_info;
4837 current_target.to_load = debug_to_load;
4838 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4839 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4840 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4841 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4842 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4843 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4844 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4845 current_target.to_has_exited = debug_to_has_exited;
4846 current_target.to_can_run = debug_to_can_run;
4847 current_target.to_stop = debug_to_stop;
4848 current_target.to_rcmd = debug_to_rcmd;
4849 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4850 current_target.to_thread_architecture = debug_to_thread_architecture;
4851 }
4852 \f
4853
4854 static char targ_desc[] =
4855 "Names of targets and files being debugged.\nShows the entire \
4856 stack of targets currently in use (including the exec-file,\n\
4857 core-file, and process, if any), as well as the symbol file name.";
4858
4859 static void
4860 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4861 {
4862 error (_("\"monitor\" command not supported by this target."));
4863 }
4864
4865 static void
4866 do_monitor_command (char *cmd,
4867 int from_tty)
4868 {
4869 target_rcmd (cmd, gdb_stdtarg);
4870 }
4871
4872 /* Print the name of each layers of our target stack. */
4873
4874 static void
4875 maintenance_print_target_stack (char *cmd, int from_tty)
4876 {
4877 struct target_ops *t;
4878
4879 printf_filtered (_("The current target stack is:\n"));
4880
4881 for (t = target_stack; t != NULL; t = t->beneath)
4882 {
4883 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4884 }
4885 }
4886
4887 /* Controls if async mode is permitted. */
4888 int target_async_permitted = 0;
4889
4890 /* The set command writes to this variable. If the inferior is
4891 executing, target_async_permitted is *not* updated. */
4892 static int target_async_permitted_1 = 0;
4893
4894 static void
4895 set_target_async_command (char *args, int from_tty,
4896 struct cmd_list_element *c)
4897 {
4898 if (have_live_inferiors ())
4899 {
4900 target_async_permitted_1 = target_async_permitted;
4901 error (_("Cannot change this setting while the inferior is running."));
4902 }
4903
4904 target_async_permitted = target_async_permitted_1;
4905 }
4906
4907 static void
4908 show_target_async_command (struct ui_file *file, int from_tty,
4909 struct cmd_list_element *c,
4910 const char *value)
4911 {
4912 fprintf_filtered (file,
4913 _("Controlling the inferior in "
4914 "asynchronous mode is %s.\n"), value);
4915 }
4916
4917 /* Temporary copies of permission settings. */
4918
4919 static int may_write_registers_1 = 1;
4920 static int may_write_memory_1 = 1;
4921 static int may_insert_breakpoints_1 = 1;
4922 static int may_insert_tracepoints_1 = 1;
4923 static int may_insert_fast_tracepoints_1 = 1;
4924 static int may_stop_1 = 1;
4925
4926 /* Make the user-set values match the real values again. */
4927
4928 void
4929 update_target_permissions (void)
4930 {
4931 may_write_registers_1 = may_write_registers;
4932 may_write_memory_1 = may_write_memory;
4933 may_insert_breakpoints_1 = may_insert_breakpoints;
4934 may_insert_tracepoints_1 = may_insert_tracepoints;
4935 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4936 may_stop_1 = may_stop;
4937 }
4938
4939 /* The one function handles (most of) the permission flags in the same
4940 way. */
4941
4942 static void
4943 set_target_permissions (char *args, int from_tty,
4944 struct cmd_list_element *c)
4945 {
4946 if (target_has_execution)
4947 {
4948 update_target_permissions ();
4949 error (_("Cannot change this setting while the inferior is running."));
4950 }
4951
4952 /* Make the real values match the user-changed values. */
4953 may_write_registers = may_write_registers_1;
4954 may_insert_breakpoints = may_insert_breakpoints_1;
4955 may_insert_tracepoints = may_insert_tracepoints_1;
4956 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4957 may_stop = may_stop_1;
4958 update_observer_mode ();
4959 }
4960
4961 /* Set memory write permission independently of observer mode. */
4962
4963 static void
4964 set_write_memory_permission (char *args, int from_tty,
4965 struct cmd_list_element *c)
4966 {
4967 /* Make the real values match the user-changed values. */
4968 may_write_memory = may_write_memory_1;
4969 update_observer_mode ();
4970 }
4971
4972
4973 void
4974 initialize_targets (void)
4975 {
4976 init_dummy_target ();
4977 push_target (&dummy_target);
4978
4979 add_info ("target", target_info, targ_desc);
4980 add_info ("files", target_info, targ_desc);
4981
4982 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4983 Set target debugging."), _("\
4984 Show target debugging."), _("\
4985 When non-zero, target debugging is enabled. Higher numbers are more\n\
4986 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4987 command."),
4988 NULL,
4989 show_targetdebug,
4990 &setdebuglist, &showdebuglist);
4991
4992 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4993 &trust_readonly, _("\
4994 Set mode for reading from readonly sections."), _("\
4995 Show mode for reading from readonly sections."), _("\
4996 When this mode is on, memory reads from readonly sections (such as .text)\n\
4997 will be read from the object file instead of from the target. This will\n\
4998 result in significant performance improvement for remote targets."),
4999 NULL,
5000 show_trust_readonly,
5001 &setlist, &showlist);
5002
5003 add_com ("monitor", class_obscure, do_monitor_command,
5004 _("Send a command to the remote monitor (remote targets only)."));
5005
5006 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5007 _("Print the name of each layer of the internal target stack."),
5008 &maintenanceprintlist);
5009
5010 add_setshow_boolean_cmd ("target-async", no_class,
5011 &target_async_permitted_1, _("\
5012 Set whether gdb controls the inferior in asynchronous mode."), _("\
5013 Show whether gdb controls the inferior in asynchronous mode."), _("\
5014 Tells gdb whether to control the inferior in asynchronous mode."),
5015 set_target_async_command,
5016 show_target_async_command,
5017 &setlist,
5018 &showlist);
5019
5020 add_setshow_boolean_cmd ("may-write-registers", class_support,
5021 &may_write_registers_1, _("\
5022 Set permission to write into registers."), _("\
5023 Show permission to write into registers."), _("\
5024 When this permission is on, GDB may write into the target's registers.\n\
5025 Otherwise, any sort of write attempt will result in an error."),
5026 set_target_permissions, NULL,
5027 &setlist, &showlist);
5028
5029 add_setshow_boolean_cmd ("may-write-memory", class_support,
5030 &may_write_memory_1, _("\
5031 Set permission to write into target memory."), _("\
5032 Show permission to write into target memory."), _("\
5033 When this permission is on, GDB may write into the target's memory.\n\
5034 Otherwise, any sort of write attempt will result in an error."),
5035 set_write_memory_permission, NULL,
5036 &setlist, &showlist);
5037
5038 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5039 &may_insert_breakpoints_1, _("\
5040 Set permission to insert breakpoints in the target."), _("\
5041 Show permission to insert breakpoints in the target."), _("\
5042 When this permission is on, GDB may insert breakpoints in the program.\n\
5043 Otherwise, any sort of insertion attempt will result in an error."),
5044 set_target_permissions, NULL,
5045 &setlist, &showlist);
5046
5047 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5048 &may_insert_tracepoints_1, _("\
5049 Set permission to insert tracepoints in the target."), _("\
5050 Show permission to insert tracepoints in the target."), _("\
5051 When this permission is on, GDB may insert tracepoints in the program.\n\
5052 Otherwise, any sort of insertion attempt will result in an error."),
5053 set_target_permissions, NULL,
5054 &setlist, &showlist);
5055
5056 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5057 &may_insert_fast_tracepoints_1, _("\
5058 Set permission to insert fast tracepoints in the target."), _("\
5059 Show permission to insert fast tracepoints in the target."), _("\
5060 When this permission is on, GDB may insert fast tracepoints.\n\
5061 Otherwise, any sort of insertion attempt will result in an error."),
5062 set_target_permissions, NULL,
5063 &setlist, &showlist);
5064
5065 add_setshow_boolean_cmd ("may-interrupt", class_support,
5066 &may_stop_1, _("\
5067 Set permission to interrupt or signal the target."), _("\
5068 Show permission to interrupt or signal the target."), _("\
5069 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5070 Otherwise, any attempt to interrupt or stop will be ignored."),
5071 set_target_permissions, NULL,
5072 &setlist, &showlist);
5073 }
This page took 0.132928 seconds and 5 git commands to generate.