convert to_thread_name
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
59
60 static void tcomplain (void) ATTRIBUTE_NORETURN;
61
62 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
63
64 static int return_zero (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (struct target_ops *self, ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops *self)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 /* Do not inherit to_post_attach. */
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 /* Do not inherit to_prepare_to_store. */
601 INHERIT (deprecated_xfer_memory, t);
602 /* Do not inherit to_files_info. */
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 /* Do not inherit to_can_use_hw_breakpoint. */
606 /* Do not inherit to_insert_hw_breakpoint. */
607 /* Do not inherit to_remove_hw_breakpoint. */
608 /* Do not inherit to_ranged_break_num_registers. */
609 /* Do not inherit to_insert_watchpoint. */
610 /* Do not inherit to_remove_watchpoint. */
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 /* Do not inherit to_watchpoint_addr_within_range. */
618 /* Do not inherit to_region_ok_for_hw_watchpoint. */
619 /* Do not inherit to_can_accel_watchpoint_condition. */
620 /* Do not inherit to_masked_watch_num_registers. */
621 /* Do not inherit to_terminal_init. */
622 /* Do not inherit to_terminal_inferior. */
623 /* Do not inherit to_terminal_ours_for_output. */
624 /* Do not inherit to_terminal_ours. */
625 /* Do not inherit to_terminal_save_ours. */
626 /* Do not inherit to_terminal_info. */
627 /* Do not inherit to_kill. */
628 /* Do not inherit to_load. */
629 /* Do no inherit to_create_inferior. */
630 /* Do not inherit to_post_startup_inferior. */
631 /* Do not inherit to_insert_fork_catchpoint. */
632 /* Do not inherit to_remove_fork_catchpoint. */
633 /* Do not inherit to_insert_vfork_catchpoint. */
634 /* Do not inherit to_remove_vfork_catchpoint. */
635 /* Do not inherit to_follow_fork. */
636 /* Do not inherit to_insert_exec_catchpoint. */
637 /* Do not inherit to_remove_exec_catchpoint. */
638 /* Do not inherit to_set_syscall_catchpoint. */
639 /* Do not inherit to_has_exited. */
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 /* Do not inherit to_extra_thread_info. */
648 /* Do not inherit to_thread_name. */
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 /* Do not inherit to_rcmd. */
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (deprecated_xfer_memory,
732 (int (*) (CORE_ADDR, gdb_byte *, int, int,
733 struct mem_attrib *, struct target_ops *))
734 nomemory);
735 de_fault (to_can_run,
736 (int (*) (struct target_ops *))
737 return_zero);
738 de_fault (to_stop,
739 (void (*) (struct target_ops *, ptid_t))
740 target_ignore);
741 de_fault (to_pid_to_exec_file,
742 (char *(*) (struct target_ops *, int))
743 return_null);
744 de_fault (to_thread_architecture,
745 default_thread_architecture);
746 current_target.to_read_description = NULL;
747 de_fault (to_get_ada_task_ptid,
748 (ptid_t (*) (struct target_ops *, long, long))
749 default_get_ada_task_ptid);
750 de_fault (to_supports_multi_process,
751 (int (*) (struct target_ops *))
752 return_zero);
753 de_fault (to_supports_enable_disable_tracepoint,
754 (int (*) (struct target_ops *))
755 return_zero);
756 de_fault (to_supports_string_tracing,
757 (int (*) (struct target_ops *))
758 return_zero);
759 de_fault (to_trace_init,
760 (void (*) (struct target_ops *))
761 tcomplain);
762 de_fault (to_download_tracepoint,
763 (void (*) (struct target_ops *, struct bp_location *))
764 tcomplain);
765 de_fault (to_can_download_tracepoint,
766 (int (*) (struct target_ops *))
767 return_zero);
768 de_fault (to_download_trace_state_variable,
769 (void (*) (struct target_ops *, struct trace_state_variable *))
770 tcomplain);
771 de_fault (to_enable_tracepoint,
772 (void (*) (struct target_ops *, struct bp_location *))
773 tcomplain);
774 de_fault (to_disable_tracepoint,
775 (void (*) (struct target_ops *, struct bp_location *))
776 tcomplain);
777 de_fault (to_trace_set_readonly_regions,
778 (void (*) (struct target_ops *))
779 tcomplain);
780 de_fault (to_trace_start,
781 (void (*) (struct target_ops *))
782 tcomplain);
783 de_fault (to_get_trace_status,
784 (int (*) (struct target_ops *, struct trace_status *))
785 return_minus_one);
786 de_fault (to_get_tracepoint_status,
787 (void (*) (struct target_ops *, struct breakpoint *,
788 struct uploaded_tp *))
789 tcomplain);
790 de_fault (to_trace_stop,
791 (void (*) (struct target_ops *))
792 tcomplain);
793 de_fault (to_trace_find,
794 (int (*) (struct target_ops *,
795 enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
796 return_minus_one);
797 de_fault (to_get_trace_state_variable_value,
798 (int (*) (struct target_ops *, int, LONGEST *))
799 return_zero);
800 de_fault (to_save_trace_data,
801 (int (*) (struct target_ops *, const char *))
802 tcomplain);
803 de_fault (to_upload_tracepoints,
804 (int (*) (struct target_ops *, struct uploaded_tp **))
805 return_zero);
806 de_fault (to_upload_trace_state_variables,
807 (int (*) (struct target_ops *, struct uploaded_tsv **))
808 return_zero);
809 de_fault (to_get_raw_trace_data,
810 (LONGEST (*) (struct target_ops *, gdb_byte *, ULONGEST, LONGEST))
811 tcomplain);
812 de_fault (to_get_min_fast_tracepoint_insn_len,
813 (int (*) (struct target_ops *))
814 return_minus_one);
815 de_fault (to_set_disconnected_tracing,
816 (void (*) (struct target_ops *, int))
817 target_ignore);
818 de_fault (to_set_circular_trace_buffer,
819 (void (*) (struct target_ops *, int))
820 target_ignore);
821 de_fault (to_set_trace_buffer_size,
822 (void (*) (struct target_ops *, LONGEST))
823 target_ignore);
824 de_fault (to_set_trace_notes,
825 (int (*) (struct target_ops *,
826 const char *, const char *, const char *))
827 return_zero);
828 de_fault (to_get_tib_address,
829 (int (*) (struct target_ops *, ptid_t, CORE_ADDR *))
830 tcomplain);
831 de_fault (to_set_permissions,
832 (void (*) (struct target_ops *))
833 target_ignore);
834 de_fault (to_static_tracepoint_marker_at,
835 (int (*) (struct target_ops *,
836 CORE_ADDR, struct static_tracepoint_marker *))
837 return_zero);
838 de_fault (to_static_tracepoint_markers_by_strid,
839 (VEC(static_tracepoint_marker_p) * (*) (struct target_ops *,
840 const char *))
841 tcomplain);
842 de_fault (to_traceframe_info,
843 (struct traceframe_info * (*) (struct target_ops *))
844 return_null);
845 de_fault (to_supports_evaluation_of_breakpoint_conditions,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_can_run_breakpoint_commands,
849 (int (*) (struct target_ops *))
850 return_zero);
851 de_fault (to_use_agent,
852 (int (*) (struct target_ops *, int))
853 tcomplain);
854 de_fault (to_can_use_agent,
855 (int (*) (struct target_ops *))
856 return_zero);
857 de_fault (to_augmented_libraries_svr4_read,
858 (int (*) (struct target_ops *))
859 return_zero);
860 de_fault (to_execution_direction, default_execution_direction);
861
862 #undef de_fault
863
864 /* Finally, position the target-stack beneath the squashed
865 "current_target". That way code looking for a non-inherited
866 target method can quickly and simply find it. */
867 current_target.beneath = target_stack;
868
869 if (targetdebug)
870 setup_target_debug ();
871 }
872
873 /* Push a new target type into the stack of the existing target accessors,
874 possibly superseding some of the existing accessors.
875
876 Rather than allow an empty stack, we always have the dummy target at
877 the bottom stratum, so we can call the function vectors without
878 checking them. */
879
880 void
881 push_target (struct target_ops *t)
882 {
883 struct target_ops **cur;
884
885 /* Check magic number. If wrong, it probably means someone changed
886 the struct definition, but not all the places that initialize one. */
887 if (t->to_magic != OPS_MAGIC)
888 {
889 fprintf_unfiltered (gdb_stderr,
890 "Magic number of %s target struct wrong\n",
891 t->to_shortname);
892 internal_error (__FILE__, __LINE__,
893 _("failed internal consistency check"));
894 }
895
896 /* Find the proper stratum to install this target in. */
897 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
898 {
899 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
900 break;
901 }
902
903 /* If there's already targets at this stratum, remove them. */
904 /* FIXME: cagney/2003-10-15: I think this should be popping all
905 targets to CUR, and not just those at this stratum level. */
906 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
907 {
908 /* There's already something at this stratum level. Close it,
909 and un-hook it from the stack. */
910 struct target_ops *tmp = (*cur);
911
912 (*cur) = (*cur)->beneath;
913 tmp->beneath = NULL;
914 target_close (tmp);
915 }
916
917 /* We have removed all targets in our stratum, now add the new one. */
918 t->beneath = (*cur);
919 (*cur) = t;
920
921 update_current_target ();
922 }
923
924 /* Remove a target_ops vector from the stack, wherever it may be.
925 Return how many times it was removed (0 or 1). */
926
927 int
928 unpush_target (struct target_ops *t)
929 {
930 struct target_ops **cur;
931 struct target_ops *tmp;
932
933 if (t->to_stratum == dummy_stratum)
934 internal_error (__FILE__, __LINE__,
935 _("Attempt to unpush the dummy target"));
936
937 /* Look for the specified target. Note that we assume that a target
938 can only occur once in the target stack. */
939
940 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
941 {
942 if ((*cur) == t)
943 break;
944 }
945
946 /* If we don't find target_ops, quit. Only open targets should be
947 closed. */
948 if ((*cur) == NULL)
949 return 0;
950
951 /* Unchain the target. */
952 tmp = (*cur);
953 (*cur) = (*cur)->beneath;
954 tmp->beneath = NULL;
955
956 update_current_target ();
957
958 /* Finally close the target. Note we do this after unchaining, so
959 any target method calls from within the target_close
960 implementation don't end up in T anymore. */
961 target_close (t);
962
963 return 1;
964 }
965
966 void
967 pop_all_targets_above (enum strata above_stratum)
968 {
969 while ((int) (current_target.to_stratum) > (int) above_stratum)
970 {
971 if (!unpush_target (target_stack))
972 {
973 fprintf_unfiltered (gdb_stderr,
974 "pop_all_targets couldn't find target %s\n",
975 target_stack->to_shortname);
976 internal_error (__FILE__, __LINE__,
977 _("failed internal consistency check"));
978 break;
979 }
980 }
981 }
982
983 void
984 pop_all_targets (void)
985 {
986 pop_all_targets_above (dummy_stratum);
987 }
988
989 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
990
991 int
992 target_is_pushed (struct target_ops *t)
993 {
994 struct target_ops **cur;
995
996 /* Check magic number. If wrong, it probably means someone changed
997 the struct definition, but not all the places that initialize one. */
998 if (t->to_magic != OPS_MAGIC)
999 {
1000 fprintf_unfiltered (gdb_stderr,
1001 "Magic number of %s target struct wrong\n",
1002 t->to_shortname);
1003 internal_error (__FILE__, __LINE__,
1004 _("failed internal consistency check"));
1005 }
1006
1007 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1008 if (*cur == t)
1009 return 1;
1010
1011 return 0;
1012 }
1013
1014 /* Using the objfile specified in OBJFILE, find the address for the
1015 current thread's thread-local storage with offset OFFSET. */
1016 CORE_ADDR
1017 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1018 {
1019 volatile CORE_ADDR addr = 0;
1020 struct target_ops *target;
1021
1022 for (target = current_target.beneath;
1023 target != NULL;
1024 target = target->beneath)
1025 {
1026 if (target->to_get_thread_local_address != NULL)
1027 break;
1028 }
1029
1030 if (target != NULL
1031 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1032 {
1033 ptid_t ptid = inferior_ptid;
1034 volatile struct gdb_exception ex;
1035
1036 TRY_CATCH (ex, RETURN_MASK_ALL)
1037 {
1038 CORE_ADDR lm_addr;
1039
1040 /* Fetch the load module address for this objfile. */
1041 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1042 objfile);
1043 /* If it's 0, throw the appropriate exception. */
1044 if (lm_addr == 0)
1045 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1046 _("TLS load module not found"));
1047
1048 addr = target->to_get_thread_local_address (target, ptid,
1049 lm_addr, offset);
1050 }
1051 /* If an error occurred, print TLS related messages here. Otherwise,
1052 throw the error to some higher catcher. */
1053 if (ex.reason < 0)
1054 {
1055 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1056
1057 switch (ex.error)
1058 {
1059 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1060 error (_("Cannot find thread-local variables "
1061 "in this thread library."));
1062 break;
1063 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1064 if (objfile_is_library)
1065 error (_("Cannot find shared library `%s' in dynamic"
1066 " linker's load module list"), objfile_name (objfile));
1067 else
1068 error (_("Cannot find executable file `%s' in dynamic"
1069 " linker's load module list"), objfile_name (objfile));
1070 break;
1071 case TLS_NOT_ALLOCATED_YET_ERROR:
1072 if (objfile_is_library)
1073 error (_("The inferior has not yet allocated storage for"
1074 " thread-local variables in\n"
1075 "the shared library `%s'\n"
1076 "for %s"),
1077 objfile_name (objfile), target_pid_to_str (ptid));
1078 else
1079 error (_("The inferior has not yet allocated storage for"
1080 " thread-local variables in\n"
1081 "the executable `%s'\n"
1082 "for %s"),
1083 objfile_name (objfile), target_pid_to_str (ptid));
1084 break;
1085 case TLS_GENERIC_ERROR:
1086 if (objfile_is_library)
1087 error (_("Cannot find thread-local storage for %s, "
1088 "shared library %s:\n%s"),
1089 target_pid_to_str (ptid),
1090 objfile_name (objfile), ex.message);
1091 else
1092 error (_("Cannot find thread-local storage for %s, "
1093 "executable file %s:\n%s"),
1094 target_pid_to_str (ptid),
1095 objfile_name (objfile), ex.message);
1096 break;
1097 default:
1098 throw_exception (ex);
1099 break;
1100 }
1101 }
1102 }
1103 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1104 TLS is an ABI-specific thing. But we don't do that yet. */
1105 else
1106 error (_("Cannot find thread-local variables on this target"));
1107
1108 return addr;
1109 }
1110
1111 const char *
1112 target_xfer_status_to_string (enum target_xfer_status err)
1113 {
1114 #define CASE(X) case X: return #X
1115 switch (err)
1116 {
1117 CASE(TARGET_XFER_E_IO);
1118 CASE(TARGET_XFER_E_UNAVAILABLE);
1119 default:
1120 return "<unknown>";
1121 }
1122 #undef CASE
1123 };
1124
1125
1126 #undef MIN
1127 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1128
1129 /* target_read_string -- read a null terminated string, up to LEN bytes,
1130 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1131 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1132 is responsible for freeing it. Return the number of bytes successfully
1133 read. */
1134
1135 int
1136 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1137 {
1138 int tlen, offset, i;
1139 gdb_byte buf[4];
1140 int errcode = 0;
1141 char *buffer;
1142 int buffer_allocated;
1143 char *bufptr;
1144 unsigned int nbytes_read = 0;
1145
1146 gdb_assert (string);
1147
1148 /* Small for testing. */
1149 buffer_allocated = 4;
1150 buffer = xmalloc (buffer_allocated);
1151 bufptr = buffer;
1152
1153 while (len > 0)
1154 {
1155 tlen = MIN (len, 4 - (memaddr & 3));
1156 offset = memaddr & 3;
1157
1158 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1159 if (errcode != 0)
1160 {
1161 /* The transfer request might have crossed the boundary to an
1162 unallocated region of memory. Retry the transfer, requesting
1163 a single byte. */
1164 tlen = 1;
1165 offset = 0;
1166 errcode = target_read_memory (memaddr, buf, 1);
1167 if (errcode != 0)
1168 goto done;
1169 }
1170
1171 if (bufptr - buffer + tlen > buffer_allocated)
1172 {
1173 unsigned int bytes;
1174
1175 bytes = bufptr - buffer;
1176 buffer_allocated *= 2;
1177 buffer = xrealloc (buffer, buffer_allocated);
1178 bufptr = buffer + bytes;
1179 }
1180
1181 for (i = 0; i < tlen; i++)
1182 {
1183 *bufptr++ = buf[i + offset];
1184 if (buf[i + offset] == '\000')
1185 {
1186 nbytes_read += i + 1;
1187 goto done;
1188 }
1189 }
1190
1191 memaddr += tlen;
1192 len -= tlen;
1193 nbytes_read += tlen;
1194 }
1195 done:
1196 *string = buffer;
1197 if (errnop != NULL)
1198 *errnop = errcode;
1199 return nbytes_read;
1200 }
1201
1202 struct target_section_table *
1203 target_get_section_table (struct target_ops *target)
1204 {
1205 struct target_ops *t;
1206
1207 if (targetdebug)
1208 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1209
1210 for (t = target; t != NULL; t = t->beneath)
1211 if (t->to_get_section_table != NULL)
1212 return (*t->to_get_section_table) (t);
1213
1214 return NULL;
1215 }
1216
1217 /* Find a section containing ADDR. */
1218
1219 struct target_section *
1220 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1221 {
1222 struct target_section_table *table = target_get_section_table (target);
1223 struct target_section *secp;
1224
1225 if (table == NULL)
1226 return NULL;
1227
1228 for (secp = table->sections; secp < table->sections_end; secp++)
1229 {
1230 if (addr >= secp->addr && addr < secp->endaddr)
1231 return secp;
1232 }
1233 return NULL;
1234 }
1235
1236 /* Read memory from the live target, even if currently inspecting a
1237 traceframe. The return is the same as that of target_read. */
1238
1239 static enum target_xfer_status
1240 target_read_live_memory (enum target_object object,
1241 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1242 ULONGEST *xfered_len)
1243 {
1244 enum target_xfer_status ret;
1245 struct cleanup *cleanup;
1246
1247 /* Switch momentarily out of tfind mode so to access live memory.
1248 Note that this must not clear global state, such as the frame
1249 cache, which must still remain valid for the previous traceframe.
1250 We may be _building_ the frame cache at this point. */
1251 cleanup = make_cleanup_restore_traceframe_number ();
1252 set_traceframe_number (-1);
1253
1254 ret = target_xfer_partial (current_target.beneath, object, NULL,
1255 myaddr, NULL, memaddr, len, xfered_len);
1256
1257 do_cleanups (cleanup);
1258 return ret;
1259 }
1260
1261 /* Using the set of read-only target sections of OPS, read live
1262 read-only memory. Note that the actual reads start from the
1263 top-most target again.
1264
1265 For interface/parameters/return description see target.h,
1266 to_xfer_partial. */
1267
1268 static enum target_xfer_status
1269 memory_xfer_live_readonly_partial (struct target_ops *ops,
1270 enum target_object object,
1271 gdb_byte *readbuf, ULONGEST memaddr,
1272 ULONGEST len, ULONGEST *xfered_len)
1273 {
1274 struct target_section *secp;
1275 struct target_section_table *table;
1276
1277 secp = target_section_by_addr (ops, memaddr);
1278 if (secp != NULL
1279 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1280 secp->the_bfd_section)
1281 & SEC_READONLY))
1282 {
1283 struct target_section *p;
1284 ULONGEST memend = memaddr + len;
1285
1286 table = target_get_section_table (ops);
1287
1288 for (p = table->sections; p < table->sections_end; p++)
1289 {
1290 if (memaddr >= p->addr)
1291 {
1292 if (memend <= p->endaddr)
1293 {
1294 /* Entire transfer is within this section. */
1295 return target_read_live_memory (object, memaddr,
1296 readbuf, len, xfered_len);
1297 }
1298 else if (memaddr >= p->endaddr)
1299 {
1300 /* This section ends before the transfer starts. */
1301 continue;
1302 }
1303 else
1304 {
1305 /* This section overlaps the transfer. Just do half. */
1306 len = p->endaddr - memaddr;
1307 return target_read_live_memory (object, memaddr,
1308 readbuf, len, xfered_len);
1309 }
1310 }
1311 }
1312 }
1313
1314 return TARGET_XFER_EOF;
1315 }
1316
1317 /* Read memory from more than one valid target. A core file, for
1318 instance, could have some of memory but delegate other bits to
1319 the target below it. So, we must manually try all targets. */
1320
1321 static enum target_xfer_status
1322 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1323 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1324 ULONGEST *xfered_len)
1325 {
1326 enum target_xfer_status res;
1327
1328 do
1329 {
1330 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1331 readbuf, writebuf, memaddr, len,
1332 xfered_len);
1333 if (res == TARGET_XFER_OK)
1334 break;
1335
1336 /* Stop if the target reports that the memory is not available. */
1337 if (res == TARGET_XFER_E_UNAVAILABLE)
1338 break;
1339
1340 /* We want to continue past core files to executables, but not
1341 past a running target's memory. */
1342 if (ops->to_has_all_memory (ops))
1343 break;
1344
1345 ops = ops->beneath;
1346 }
1347 while (ops != NULL);
1348
1349 return res;
1350 }
1351
1352 /* Perform a partial memory transfer.
1353 For docs see target.h, to_xfer_partial. */
1354
1355 static enum target_xfer_status
1356 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1357 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1358 ULONGEST len, ULONGEST *xfered_len)
1359 {
1360 enum target_xfer_status res;
1361 int reg_len;
1362 struct mem_region *region;
1363 struct inferior *inf;
1364
1365 /* For accesses to unmapped overlay sections, read directly from
1366 files. Must do this first, as MEMADDR may need adjustment. */
1367 if (readbuf != NULL && overlay_debugging)
1368 {
1369 struct obj_section *section = find_pc_overlay (memaddr);
1370
1371 if (pc_in_unmapped_range (memaddr, section))
1372 {
1373 struct target_section_table *table
1374 = target_get_section_table (ops);
1375 const char *section_name = section->the_bfd_section->name;
1376
1377 memaddr = overlay_mapped_address (memaddr, section);
1378 return section_table_xfer_memory_partial (readbuf, writebuf,
1379 memaddr, len, xfered_len,
1380 table->sections,
1381 table->sections_end,
1382 section_name);
1383 }
1384 }
1385
1386 /* Try the executable files, if "trust-readonly-sections" is set. */
1387 if (readbuf != NULL && trust_readonly)
1388 {
1389 struct target_section *secp;
1390 struct target_section_table *table;
1391
1392 secp = target_section_by_addr (ops, memaddr);
1393 if (secp != NULL
1394 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1395 secp->the_bfd_section)
1396 & SEC_READONLY))
1397 {
1398 table = target_get_section_table (ops);
1399 return section_table_xfer_memory_partial (readbuf, writebuf,
1400 memaddr, len, xfered_len,
1401 table->sections,
1402 table->sections_end,
1403 NULL);
1404 }
1405 }
1406
1407 /* If reading unavailable memory in the context of traceframes, and
1408 this address falls within a read-only section, fallback to
1409 reading from live memory. */
1410 if (readbuf != NULL && get_traceframe_number () != -1)
1411 {
1412 VEC(mem_range_s) *available;
1413
1414 /* If we fail to get the set of available memory, then the
1415 target does not support querying traceframe info, and so we
1416 attempt reading from the traceframe anyway (assuming the
1417 target implements the old QTro packet then). */
1418 if (traceframe_available_memory (&available, memaddr, len))
1419 {
1420 struct cleanup *old_chain;
1421
1422 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1423
1424 if (VEC_empty (mem_range_s, available)
1425 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1426 {
1427 /* Don't read into the traceframe's available
1428 memory. */
1429 if (!VEC_empty (mem_range_s, available))
1430 {
1431 LONGEST oldlen = len;
1432
1433 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1434 gdb_assert (len <= oldlen);
1435 }
1436
1437 do_cleanups (old_chain);
1438
1439 /* This goes through the topmost target again. */
1440 res = memory_xfer_live_readonly_partial (ops, object,
1441 readbuf, memaddr,
1442 len, xfered_len);
1443 if (res == TARGET_XFER_OK)
1444 return TARGET_XFER_OK;
1445 else
1446 {
1447 /* No use trying further, we know some memory starting
1448 at MEMADDR isn't available. */
1449 *xfered_len = len;
1450 return TARGET_XFER_E_UNAVAILABLE;
1451 }
1452 }
1453
1454 /* Don't try to read more than how much is available, in
1455 case the target implements the deprecated QTro packet to
1456 cater for older GDBs (the target's knowledge of read-only
1457 sections may be outdated by now). */
1458 len = VEC_index (mem_range_s, available, 0)->length;
1459
1460 do_cleanups (old_chain);
1461 }
1462 }
1463
1464 /* Try GDB's internal data cache. */
1465 region = lookup_mem_region (memaddr);
1466 /* region->hi == 0 means there's no upper bound. */
1467 if (memaddr + len < region->hi || region->hi == 0)
1468 reg_len = len;
1469 else
1470 reg_len = region->hi - memaddr;
1471
1472 switch (region->attrib.mode)
1473 {
1474 case MEM_RO:
1475 if (writebuf != NULL)
1476 return TARGET_XFER_E_IO;
1477 break;
1478
1479 case MEM_WO:
1480 if (readbuf != NULL)
1481 return TARGET_XFER_E_IO;
1482 break;
1483
1484 case MEM_FLASH:
1485 /* We only support writing to flash during "load" for now. */
1486 if (writebuf != NULL)
1487 error (_("Writing to flash memory forbidden in this context"));
1488 break;
1489
1490 case MEM_NONE:
1491 return TARGET_XFER_E_IO;
1492 }
1493
1494 if (!ptid_equal (inferior_ptid, null_ptid))
1495 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1496 else
1497 inf = NULL;
1498
1499 if (inf != NULL
1500 /* The dcache reads whole cache lines; that doesn't play well
1501 with reading from a trace buffer, because reading outside of
1502 the collected memory range fails. */
1503 && get_traceframe_number () == -1
1504 && (region->attrib.cache
1505 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1506 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1507 {
1508 DCACHE *dcache = target_dcache_get_or_init ();
1509 int l;
1510
1511 if (readbuf != NULL)
1512 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1513 else
1514 /* FIXME drow/2006-08-09: If we're going to preserve const
1515 correctness dcache_xfer_memory should take readbuf and
1516 writebuf. */
1517 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1518 reg_len, 1);
1519 if (l <= 0)
1520 return TARGET_XFER_E_IO;
1521 else
1522 {
1523 *xfered_len = (ULONGEST) l;
1524 return TARGET_XFER_OK;
1525 }
1526 }
1527
1528 /* If none of those methods found the memory we wanted, fall back
1529 to a target partial transfer. Normally a single call to
1530 to_xfer_partial is enough; if it doesn't recognize an object
1531 it will call the to_xfer_partial of the next target down.
1532 But for memory this won't do. Memory is the only target
1533 object which can be read from more than one valid target.
1534 A core file, for instance, could have some of memory but
1535 delegate other bits to the target below it. So, we must
1536 manually try all targets. */
1537
1538 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1539 xfered_len);
1540
1541 /* Make sure the cache gets updated no matter what - if we are writing
1542 to the stack. Even if this write is not tagged as such, we still need
1543 to update the cache. */
1544
1545 if (res == TARGET_XFER_OK
1546 && inf != NULL
1547 && writebuf != NULL
1548 && target_dcache_init_p ()
1549 && !region->attrib.cache
1550 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1551 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1552 {
1553 DCACHE *dcache = target_dcache_get ();
1554
1555 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1556 }
1557
1558 /* If we still haven't got anything, return the last error. We
1559 give up. */
1560 return res;
1561 }
1562
1563 /* Perform a partial memory transfer. For docs see target.h,
1564 to_xfer_partial. */
1565
1566 static enum target_xfer_status
1567 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1568 gdb_byte *readbuf, const gdb_byte *writebuf,
1569 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1570 {
1571 enum target_xfer_status res;
1572
1573 /* Zero length requests are ok and require no work. */
1574 if (len == 0)
1575 return TARGET_XFER_EOF;
1576
1577 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1578 breakpoint insns, thus hiding out from higher layers whether
1579 there are software breakpoints inserted in the code stream. */
1580 if (readbuf != NULL)
1581 {
1582 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1583 xfered_len);
1584
1585 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1586 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1587 }
1588 else
1589 {
1590 void *buf;
1591 struct cleanup *old_chain;
1592
1593 /* A large write request is likely to be partially satisfied
1594 by memory_xfer_partial_1. We will continually malloc
1595 and free a copy of the entire write request for breakpoint
1596 shadow handling even though we only end up writing a small
1597 subset of it. Cap writes to 4KB to mitigate this. */
1598 len = min (4096, len);
1599
1600 buf = xmalloc (len);
1601 old_chain = make_cleanup (xfree, buf);
1602 memcpy (buf, writebuf, len);
1603
1604 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1605 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1606 xfered_len);
1607
1608 do_cleanups (old_chain);
1609 }
1610
1611 return res;
1612 }
1613
1614 static void
1615 restore_show_memory_breakpoints (void *arg)
1616 {
1617 show_memory_breakpoints = (uintptr_t) arg;
1618 }
1619
1620 struct cleanup *
1621 make_show_memory_breakpoints_cleanup (int show)
1622 {
1623 int current = show_memory_breakpoints;
1624
1625 show_memory_breakpoints = show;
1626 return make_cleanup (restore_show_memory_breakpoints,
1627 (void *) (uintptr_t) current);
1628 }
1629
1630 /* For docs see target.h, to_xfer_partial. */
1631
1632 enum target_xfer_status
1633 target_xfer_partial (struct target_ops *ops,
1634 enum target_object object, const char *annex,
1635 gdb_byte *readbuf, const gdb_byte *writebuf,
1636 ULONGEST offset, ULONGEST len,
1637 ULONGEST *xfered_len)
1638 {
1639 enum target_xfer_status retval;
1640
1641 gdb_assert (ops->to_xfer_partial != NULL);
1642
1643 /* Transfer is done when LEN is zero. */
1644 if (len == 0)
1645 return TARGET_XFER_EOF;
1646
1647 if (writebuf && !may_write_memory)
1648 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1649 core_addr_to_string_nz (offset), plongest (len));
1650
1651 *xfered_len = 0;
1652
1653 /* If this is a memory transfer, let the memory-specific code
1654 have a look at it instead. Memory transfers are more
1655 complicated. */
1656 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1657 || object == TARGET_OBJECT_CODE_MEMORY)
1658 retval = memory_xfer_partial (ops, object, readbuf,
1659 writebuf, offset, len, xfered_len);
1660 else if (object == TARGET_OBJECT_RAW_MEMORY)
1661 {
1662 /* Request the normal memory object from other layers. */
1663 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1664 xfered_len);
1665 }
1666 else
1667 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1668 writebuf, offset, len, xfered_len);
1669
1670 if (targetdebug)
1671 {
1672 const unsigned char *myaddr = NULL;
1673
1674 fprintf_unfiltered (gdb_stdlog,
1675 "%s:target_xfer_partial "
1676 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1677 ops->to_shortname,
1678 (int) object,
1679 (annex ? annex : "(null)"),
1680 host_address_to_string (readbuf),
1681 host_address_to_string (writebuf),
1682 core_addr_to_string_nz (offset),
1683 pulongest (len), retval,
1684 pulongest (*xfered_len));
1685
1686 if (readbuf)
1687 myaddr = readbuf;
1688 if (writebuf)
1689 myaddr = writebuf;
1690 if (retval == TARGET_XFER_OK && myaddr != NULL)
1691 {
1692 int i;
1693
1694 fputs_unfiltered (", bytes =", gdb_stdlog);
1695 for (i = 0; i < *xfered_len; i++)
1696 {
1697 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1698 {
1699 if (targetdebug < 2 && i > 0)
1700 {
1701 fprintf_unfiltered (gdb_stdlog, " ...");
1702 break;
1703 }
1704 fprintf_unfiltered (gdb_stdlog, "\n");
1705 }
1706
1707 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1708 }
1709 }
1710
1711 fputc_unfiltered ('\n', gdb_stdlog);
1712 }
1713
1714 /* Check implementations of to_xfer_partial update *XFERED_LEN
1715 properly. Do assertion after printing debug messages, so that we
1716 can find more clues on assertion failure from debugging messages. */
1717 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1718 gdb_assert (*xfered_len > 0);
1719
1720 return retval;
1721 }
1722
1723 /* Read LEN bytes of target memory at address MEMADDR, placing the
1724 results in GDB's memory at MYADDR. Returns either 0 for success or
1725 TARGET_XFER_E_IO if any error occurs.
1726
1727 If an error occurs, no guarantee is made about the contents of the data at
1728 MYADDR. In particular, the caller should not depend upon partial reads
1729 filling the buffer with good data. There is no way for the caller to know
1730 how much good data might have been transfered anyway. Callers that can
1731 deal with partial reads should call target_read (which will retry until
1732 it makes no progress, and then return how much was transferred). */
1733
1734 int
1735 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1736 {
1737 /* Dispatch to the topmost target, not the flattened current_target.
1738 Memory accesses check target->to_has_(all_)memory, and the
1739 flattened target doesn't inherit those. */
1740 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1741 myaddr, memaddr, len) == len)
1742 return 0;
1743 else
1744 return TARGET_XFER_E_IO;
1745 }
1746
1747 /* Like target_read_memory, but specify explicitly that this is a read
1748 from the target's raw memory. That is, this read bypasses the
1749 dcache, breakpoint shadowing, etc. */
1750
1751 int
1752 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1753 {
1754 /* See comment in target_read_memory about why the request starts at
1755 current_target.beneath. */
1756 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1757 myaddr, memaddr, len) == len)
1758 return 0;
1759 else
1760 return TARGET_XFER_E_IO;
1761 }
1762
1763 /* Like target_read_memory, but specify explicitly that this is a read from
1764 the target's stack. This may trigger different cache behavior. */
1765
1766 int
1767 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1768 {
1769 /* See comment in target_read_memory about why the request starts at
1770 current_target.beneath. */
1771 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1772 myaddr, memaddr, len) == len)
1773 return 0;
1774 else
1775 return TARGET_XFER_E_IO;
1776 }
1777
1778 /* Like target_read_memory, but specify explicitly that this is a read from
1779 the target's code. This may trigger different cache behavior. */
1780
1781 int
1782 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1783 {
1784 /* See comment in target_read_memory about why the request starts at
1785 current_target.beneath. */
1786 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1787 myaddr, memaddr, len) == len)
1788 return 0;
1789 else
1790 return TARGET_XFER_E_IO;
1791 }
1792
1793 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1794 Returns either 0 for success or TARGET_XFER_E_IO if any
1795 error occurs. If an error occurs, no guarantee is made about how
1796 much data got written. Callers that can deal with partial writes
1797 should call target_write. */
1798
1799 int
1800 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1801 {
1802 /* See comment in target_read_memory about why the request starts at
1803 current_target.beneath. */
1804 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1805 myaddr, memaddr, len) == len)
1806 return 0;
1807 else
1808 return TARGET_XFER_E_IO;
1809 }
1810
1811 /* Write LEN bytes from MYADDR to target raw memory at address
1812 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1813 if any error occurs. If an error occurs, no guarantee is made
1814 about how much data got written. Callers that can deal with
1815 partial writes should call target_write. */
1816
1817 int
1818 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1819 {
1820 /* See comment in target_read_memory about why the request starts at
1821 current_target.beneath. */
1822 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1823 myaddr, memaddr, len) == len)
1824 return 0;
1825 else
1826 return TARGET_XFER_E_IO;
1827 }
1828
1829 /* Fetch the target's memory map. */
1830
1831 VEC(mem_region_s) *
1832 target_memory_map (void)
1833 {
1834 VEC(mem_region_s) *result;
1835 struct mem_region *last_one, *this_one;
1836 int ix;
1837 struct target_ops *t;
1838
1839 if (targetdebug)
1840 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1841
1842 for (t = current_target.beneath; t != NULL; t = t->beneath)
1843 if (t->to_memory_map != NULL)
1844 break;
1845
1846 if (t == NULL)
1847 return NULL;
1848
1849 result = t->to_memory_map (t);
1850 if (result == NULL)
1851 return NULL;
1852
1853 qsort (VEC_address (mem_region_s, result),
1854 VEC_length (mem_region_s, result),
1855 sizeof (struct mem_region), mem_region_cmp);
1856
1857 /* Check that regions do not overlap. Simultaneously assign
1858 a numbering for the "mem" commands to use to refer to
1859 each region. */
1860 last_one = NULL;
1861 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1862 {
1863 this_one->number = ix;
1864
1865 if (last_one && last_one->hi > this_one->lo)
1866 {
1867 warning (_("Overlapping regions in memory map: ignoring"));
1868 VEC_free (mem_region_s, result);
1869 return NULL;
1870 }
1871 last_one = this_one;
1872 }
1873
1874 return result;
1875 }
1876
1877 void
1878 target_flash_erase (ULONGEST address, LONGEST length)
1879 {
1880 struct target_ops *t;
1881
1882 for (t = current_target.beneath; t != NULL; t = t->beneath)
1883 if (t->to_flash_erase != NULL)
1884 {
1885 if (targetdebug)
1886 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1887 hex_string (address), phex (length, 0));
1888 t->to_flash_erase (t, address, length);
1889 return;
1890 }
1891
1892 tcomplain ();
1893 }
1894
1895 void
1896 target_flash_done (void)
1897 {
1898 struct target_ops *t;
1899
1900 for (t = current_target.beneath; t != NULL; t = t->beneath)
1901 if (t->to_flash_done != NULL)
1902 {
1903 if (targetdebug)
1904 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1905 t->to_flash_done (t);
1906 return;
1907 }
1908
1909 tcomplain ();
1910 }
1911
1912 static void
1913 show_trust_readonly (struct ui_file *file, int from_tty,
1914 struct cmd_list_element *c, const char *value)
1915 {
1916 fprintf_filtered (file,
1917 _("Mode for reading from readonly sections is %s.\n"),
1918 value);
1919 }
1920
1921 /* More generic transfers. */
1922
1923 static enum target_xfer_status
1924 default_xfer_partial (struct target_ops *ops, enum target_object object,
1925 const char *annex, gdb_byte *readbuf,
1926 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1927 ULONGEST *xfered_len)
1928 {
1929 if (object == TARGET_OBJECT_MEMORY
1930 && ops->deprecated_xfer_memory != NULL)
1931 /* If available, fall back to the target's
1932 "deprecated_xfer_memory" method. */
1933 {
1934 int xfered = -1;
1935
1936 errno = 0;
1937 if (writebuf != NULL)
1938 {
1939 void *buffer = xmalloc (len);
1940 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1941
1942 memcpy (buffer, writebuf, len);
1943 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1944 1/*write*/, NULL, ops);
1945 do_cleanups (cleanup);
1946 }
1947 if (readbuf != NULL)
1948 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1949 0/*read*/, NULL, ops);
1950 if (xfered > 0)
1951 {
1952 *xfered_len = (ULONGEST) xfered;
1953 return TARGET_XFER_E_IO;
1954 }
1955 else if (xfered == 0 && errno == 0)
1956 /* "deprecated_xfer_memory" uses 0, cross checked against
1957 ERRNO as one indication of an error. */
1958 return TARGET_XFER_EOF;
1959 else
1960 return TARGET_XFER_E_IO;
1961 }
1962 else
1963 {
1964 gdb_assert (ops->beneath != NULL);
1965 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1966 readbuf, writebuf, offset, len,
1967 xfered_len);
1968 }
1969 }
1970
1971 /* Target vector read/write partial wrapper functions. */
1972
1973 static enum target_xfer_status
1974 target_read_partial (struct target_ops *ops,
1975 enum target_object object,
1976 const char *annex, gdb_byte *buf,
1977 ULONGEST offset, ULONGEST len,
1978 ULONGEST *xfered_len)
1979 {
1980 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1981 xfered_len);
1982 }
1983
1984 static enum target_xfer_status
1985 target_write_partial (struct target_ops *ops,
1986 enum target_object object,
1987 const char *annex, const gdb_byte *buf,
1988 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1989 {
1990 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1991 xfered_len);
1992 }
1993
1994 /* Wrappers to perform the full transfer. */
1995
1996 /* For docs on target_read see target.h. */
1997
1998 LONGEST
1999 target_read (struct target_ops *ops,
2000 enum target_object object,
2001 const char *annex, gdb_byte *buf,
2002 ULONGEST offset, LONGEST len)
2003 {
2004 LONGEST xfered = 0;
2005
2006 while (xfered < len)
2007 {
2008 ULONGEST xfered_len;
2009 enum target_xfer_status status;
2010
2011 status = target_read_partial (ops, object, annex,
2012 (gdb_byte *) buf + xfered,
2013 offset + xfered, len - xfered,
2014 &xfered_len);
2015
2016 /* Call an observer, notifying them of the xfer progress? */
2017 if (status == TARGET_XFER_EOF)
2018 return xfered;
2019 else if (status == TARGET_XFER_OK)
2020 {
2021 xfered += xfered_len;
2022 QUIT;
2023 }
2024 else
2025 return -1;
2026
2027 }
2028 return len;
2029 }
2030
2031 /* Assuming that the entire [begin, end) range of memory cannot be
2032 read, try to read whatever subrange is possible to read.
2033
2034 The function returns, in RESULT, either zero or one memory block.
2035 If there's a readable subrange at the beginning, it is completely
2036 read and returned. Any further readable subrange will not be read.
2037 Otherwise, if there's a readable subrange at the end, it will be
2038 completely read and returned. Any readable subranges before it
2039 (obviously, not starting at the beginning), will be ignored. In
2040 other cases -- either no readable subrange, or readable subrange(s)
2041 that is neither at the beginning, or end, nothing is returned.
2042
2043 The purpose of this function is to handle a read across a boundary
2044 of accessible memory in a case when memory map is not available.
2045 The above restrictions are fine for this case, but will give
2046 incorrect results if the memory is 'patchy'. However, supporting
2047 'patchy' memory would require trying to read every single byte,
2048 and it seems unacceptable solution. Explicit memory map is
2049 recommended for this case -- and target_read_memory_robust will
2050 take care of reading multiple ranges then. */
2051
2052 static void
2053 read_whatever_is_readable (struct target_ops *ops,
2054 ULONGEST begin, ULONGEST end,
2055 VEC(memory_read_result_s) **result)
2056 {
2057 gdb_byte *buf = xmalloc (end - begin);
2058 ULONGEST current_begin = begin;
2059 ULONGEST current_end = end;
2060 int forward;
2061 memory_read_result_s r;
2062 ULONGEST xfered_len;
2063
2064 /* If we previously failed to read 1 byte, nothing can be done here. */
2065 if (end - begin <= 1)
2066 {
2067 xfree (buf);
2068 return;
2069 }
2070
2071 /* Check that either first or the last byte is readable, and give up
2072 if not. This heuristic is meant to permit reading accessible memory
2073 at the boundary of accessible region. */
2074 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2075 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2076 {
2077 forward = 1;
2078 ++current_begin;
2079 }
2080 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2081 buf + (end-begin) - 1, end - 1, 1,
2082 &xfered_len) == TARGET_XFER_OK)
2083 {
2084 forward = 0;
2085 --current_end;
2086 }
2087 else
2088 {
2089 xfree (buf);
2090 return;
2091 }
2092
2093 /* Loop invariant is that the [current_begin, current_end) was previously
2094 found to be not readable as a whole.
2095
2096 Note loop condition -- if the range has 1 byte, we can't divide the range
2097 so there's no point trying further. */
2098 while (current_end - current_begin > 1)
2099 {
2100 ULONGEST first_half_begin, first_half_end;
2101 ULONGEST second_half_begin, second_half_end;
2102 LONGEST xfer;
2103 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2104
2105 if (forward)
2106 {
2107 first_half_begin = current_begin;
2108 first_half_end = middle;
2109 second_half_begin = middle;
2110 second_half_end = current_end;
2111 }
2112 else
2113 {
2114 first_half_begin = middle;
2115 first_half_end = current_end;
2116 second_half_begin = current_begin;
2117 second_half_end = middle;
2118 }
2119
2120 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2121 buf + (first_half_begin - begin),
2122 first_half_begin,
2123 first_half_end - first_half_begin);
2124
2125 if (xfer == first_half_end - first_half_begin)
2126 {
2127 /* This half reads up fine. So, the error must be in the
2128 other half. */
2129 current_begin = second_half_begin;
2130 current_end = second_half_end;
2131 }
2132 else
2133 {
2134 /* This half is not readable. Because we've tried one byte, we
2135 know some part of this half if actually redable. Go to the next
2136 iteration to divide again and try to read.
2137
2138 We don't handle the other half, because this function only tries
2139 to read a single readable subrange. */
2140 current_begin = first_half_begin;
2141 current_end = first_half_end;
2142 }
2143 }
2144
2145 if (forward)
2146 {
2147 /* The [begin, current_begin) range has been read. */
2148 r.begin = begin;
2149 r.end = current_begin;
2150 r.data = buf;
2151 }
2152 else
2153 {
2154 /* The [current_end, end) range has been read. */
2155 LONGEST rlen = end - current_end;
2156
2157 r.data = xmalloc (rlen);
2158 memcpy (r.data, buf + current_end - begin, rlen);
2159 r.begin = current_end;
2160 r.end = end;
2161 xfree (buf);
2162 }
2163 VEC_safe_push(memory_read_result_s, (*result), &r);
2164 }
2165
2166 void
2167 free_memory_read_result_vector (void *x)
2168 {
2169 VEC(memory_read_result_s) *v = x;
2170 memory_read_result_s *current;
2171 int ix;
2172
2173 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2174 {
2175 xfree (current->data);
2176 }
2177 VEC_free (memory_read_result_s, v);
2178 }
2179
2180 VEC(memory_read_result_s) *
2181 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2182 {
2183 VEC(memory_read_result_s) *result = 0;
2184
2185 LONGEST xfered = 0;
2186 while (xfered < len)
2187 {
2188 struct mem_region *region = lookup_mem_region (offset + xfered);
2189 LONGEST rlen;
2190
2191 /* If there is no explicit region, a fake one should be created. */
2192 gdb_assert (region);
2193
2194 if (region->hi == 0)
2195 rlen = len - xfered;
2196 else
2197 rlen = region->hi - offset;
2198
2199 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2200 {
2201 /* Cannot read this region. Note that we can end up here only
2202 if the region is explicitly marked inaccessible, or
2203 'inaccessible-by-default' is in effect. */
2204 xfered += rlen;
2205 }
2206 else
2207 {
2208 LONGEST to_read = min (len - xfered, rlen);
2209 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2210
2211 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2212 (gdb_byte *) buffer,
2213 offset + xfered, to_read);
2214 /* Call an observer, notifying them of the xfer progress? */
2215 if (xfer <= 0)
2216 {
2217 /* Got an error reading full chunk. See if maybe we can read
2218 some subrange. */
2219 xfree (buffer);
2220 read_whatever_is_readable (ops, offset + xfered,
2221 offset + xfered + to_read, &result);
2222 xfered += to_read;
2223 }
2224 else
2225 {
2226 struct memory_read_result r;
2227 r.data = buffer;
2228 r.begin = offset + xfered;
2229 r.end = r.begin + xfer;
2230 VEC_safe_push (memory_read_result_s, result, &r);
2231 xfered += xfer;
2232 }
2233 QUIT;
2234 }
2235 }
2236 return result;
2237 }
2238
2239
2240 /* An alternative to target_write with progress callbacks. */
2241
2242 LONGEST
2243 target_write_with_progress (struct target_ops *ops,
2244 enum target_object object,
2245 const char *annex, const gdb_byte *buf,
2246 ULONGEST offset, LONGEST len,
2247 void (*progress) (ULONGEST, void *), void *baton)
2248 {
2249 LONGEST xfered = 0;
2250
2251 /* Give the progress callback a chance to set up. */
2252 if (progress)
2253 (*progress) (0, baton);
2254
2255 while (xfered < len)
2256 {
2257 ULONGEST xfered_len;
2258 enum target_xfer_status status;
2259
2260 status = target_write_partial (ops, object, annex,
2261 (gdb_byte *) buf + xfered,
2262 offset + xfered, len - xfered,
2263 &xfered_len);
2264
2265 if (status == TARGET_XFER_EOF)
2266 return xfered;
2267 if (TARGET_XFER_STATUS_ERROR_P (status))
2268 return -1;
2269
2270 gdb_assert (status == TARGET_XFER_OK);
2271 if (progress)
2272 (*progress) (xfered_len, baton);
2273
2274 xfered += xfered_len;
2275 QUIT;
2276 }
2277 return len;
2278 }
2279
2280 /* For docs on target_write see target.h. */
2281
2282 LONGEST
2283 target_write (struct target_ops *ops,
2284 enum target_object object,
2285 const char *annex, const gdb_byte *buf,
2286 ULONGEST offset, LONGEST len)
2287 {
2288 return target_write_with_progress (ops, object, annex, buf, offset, len,
2289 NULL, NULL);
2290 }
2291
2292 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2293 the size of the transferred data. PADDING additional bytes are
2294 available in *BUF_P. This is a helper function for
2295 target_read_alloc; see the declaration of that function for more
2296 information. */
2297
2298 static LONGEST
2299 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2300 const char *annex, gdb_byte **buf_p, int padding)
2301 {
2302 size_t buf_alloc, buf_pos;
2303 gdb_byte *buf;
2304
2305 /* This function does not have a length parameter; it reads the
2306 entire OBJECT). Also, it doesn't support objects fetched partly
2307 from one target and partly from another (in a different stratum,
2308 e.g. a core file and an executable). Both reasons make it
2309 unsuitable for reading memory. */
2310 gdb_assert (object != TARGET_OBJECT_MEMORY);
2311
2312 /* Start by reading up to 4K at a time. The target will throttle
2313 this number down if necessary. */
2314 buf_alloc = 4096;
2315 buf = xmalloc (buf_alloc);
2316 buf_pos = 0;
2317 while (1)
2318 {
2319 ULONGEST xfered_len;
2320 enum target_xfer_status status;
2321
2322 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2323 buf_pos, buf_alloc - buf_pos - padding,
2324 &xfered_len);
2325
2326 if (status == TARGET_XFER_EOF)
2327 {
2328 /* Read all there was. */
2329 if (buf_pos == 0)
2330 xfree (buf);
2331 else
2332 *buf_p = buf;
2333 return buf_pos;
2334 }
2335 else if (status != TARGET_XFER_OK)
2336 {
2337 /* An error occurred. */
2338 xfree (buf);
2339 return TARGET_XFER_E_IO;
2340 }
2341
2342 buf_pos += xfered_len;
2343
2344 /* If the buffer is filling up, expand it. */
2345 if (buf_alloc < buf_pos * 2)
2346 {
2347 buf_alloc *= 2;
2348 buf = xrealloc (buf, buf_alloc);
2349 }
2350
2351 QUIT;
2352 }
2353 }
2354
2355 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2356 the size of the transferred data. See the declaration in "target.h"
2357 function for more information about the return value. */
2358
2359 LONGEST
2360 target_read_alloc (struct target_ops *ops, enum target_object object,
2361 const char *annex, gdb_byte **buf_p)
2362 {
2363 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2364 }
2365
2366 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2367 returned as a string, allocated using xmalloc. If an error occurs
2368 or the transfer is unsupported, NULL is returned. Empty objects
2369 are returned as allocated but empty strings. A warning is issued
2370 if the result contains any embedded NUL bytes. */
2371
2372 char *
2373 target_read_stralloc (struct target_ops *ops, enum target_object object,
2374 const char *annex)
2375 {
2376 gdb_byte *buffer;
2377 char *bufstr;
2378 LONGEST i, transferred;
2379
2380 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2381 bufstr = (char *) buffer;
2382
2383 if (transferred < 0)
2384 return NULL;
2385
2386 if (transferred == 0)
2387 return xstrdup ("");
2388
2389 bufstr[transferred] = 0;
2390
2391 /* Check for embedded NUL bytes; but allow trailing NULs. */
2392 for (i = strlen (bufstr); i < transferred; i++)
2393 if (bufstr[i] != 0)
2394 {
2395 warning (_("target object %d, annex %s, "
2396 "contained unexpected null characters"),
2397 (int) object, annex ? annex : "(none)");
2398 break;
2399 }
2400
2401 return bufstr;
2402 }
2403
2404 /* Memory transfer methods. */
2405
2406 void
2407 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2408 LONGEST len)
2409 {
2410 /* This method is used to read from an alternate, non-current
2411 target. This read must bypass the overlay support (as symbols
2412 don't match this target), and GDB's internal cache (wrong cache
2413 for this target). */
2414 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2415 != len)
2416 memory_error (TARGET_XFER_E_IO, addr);
2417 }
2418
2419 ULONGEST
2420 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2421 int len, enum bfd_endian byte_order)
2422 {
2423 gdb_byte buf[sizeof (ULONGEST)];
2424
2425 gdb_assert (len <= sizeof (buf));
2426 get_target_memory (ops, addr, buf, len);
2427 return extract_unsigned_integer (buf, len, byte_order);
2428 }
2429
2430 /* See target.h. */
2431
2432 int
2433 target_insert_breakpoint (struct gdbarch *gdbarch,
2434 struct bp_target_info *bp_tgt)
2435 {
2436 if (!may_insert_breakpoints)
2437 {
2438 warning (_("May not insert breakpoints"));
2439 return 1;
2440 }
2441
2442 return current_target.to_insert_breakpoint (&current_target,
2443 gdbarch, bp_tgt);
2444 }
2445
2446 /* See target.h. */
2447
2448 int
2449 target_remove_breakpoint (struct gdbarch *gdbarch,
2450 struct bp_target_info *bp_tgt)
2451 {
2452 /* This is kind of a weird case to handle, but the permission might
2453 have been changed after breakpoints were inserted - in which case
2454 we should just take the user literally and assume that any
2455 breakpoints should be left in place. */
2456 if (!may_insert_breakpoints)
2457 {
2458 warning (_("May not remove breakpoints"));
2459 return 1;
2460 }
2461
2462 return current_target.to_remove_breakpoint (&current_target,
2463 gdbarch, bp_tgt);
2464 }
2465
2466 static void
2467 target_info (char *args, int from_tty)
2468 {
2469 struct target_ops *t;
2470 int has_all_mem = 0;
2471
2472 if (symfile_objfile != NULL)
2473 printf_unfiltered (_("Symbols from \"%s\".\n"),
2474 objfile_name (symfile_objfile));
2475
2476 for (t = target_stack; t != NULL; t = t->beneath)
2477 {
2478 if (!(*t->to_has_memory) (t))
2479 continue;
2480
2481 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2482 continue;
2483 if (has_all_mem)
2484 printf_unfiltered (_("\tWhile running this, "
2485 "GDB does not access memory from...\n"));
2486 printf_unfiltered ("%s:\n", t->to_longname);
2487 (t->to_files_info) (t);
2488 has_all_mem = (*t->to_has_all_memory) (t);
2489 }
2490 }
2491
2492 /* This function is called before any new inferior is created, e.g.
2493 by running a program, attaching, or connecting to a target.
2494 It cleans up any state from previous invocations which might
2495 change between runs. This is a subset of what target_preopen
2496 resets (things which might change between targets). */
2497
2498 void
2499 target_pre_inferior (int from_tty)
2500 {
2501 /* Clear out solib state. Otherwise the solib state of the previous
2502 inferior might have survived and is entirely wrong for the new
2503 target. This has been observed on GNU/Linux using glibc 2.3. How
2504 to reproduce:
2505
2506 bash$ ./foo&
2507 [1] 4711
2508 bash$ ./foo&
2509 [1] 4712
2510 bash$ gdb ./foo
2511 [...]
2512 (gdb) attach 4711
2513 (gdb) detach
2514 (gdb) attach 4712
2515 Cannot access memory at address 0xdeadbeef
2516 */
2517
2518 /* In some OSs, the shared library list is the same/global/shared
2519 across inferiors. If code is shared between processes, so are
2520 memory regions and features. */
2521 if (!gdbarch_has_global_solist (target_gdbarch ()))
2522 {
2523 no_shared_libraries (NULL, from_tty);
2524
2525 invalidate_target_mem_regions ();
2526
2527 target_clear_description ();
2528 }
2529
2530 agent_capability_invalidate ();
2531 }
2532
2533 /* Callback for iterate_over_inferiors. Gets rid of the given
2534 inferior. */
2535
2536 static int
2537 dispose_inferior (struct inferior *inf, void *args)
2538 {
2539 struct thread_info *thread;
2540
2541 thread = any_thread_of_process (inf->pid);
2542 if (thread)
2543 {
2544 switch_to_thread (thread->ptid);
2545
2546 /* Core inferiors actually should be detached, not killed. */
2547 if (target_has_execution)
2548 target_kill ();
2549 else
2550 target_detach (NULL, 0);
2551 }
2552
2553 return 0;
2554 }
2555
2556 /* This is to be called by the open routine before it does
2557 anything. */
2558
2559 void
2560 target_preopen (int from_tty)
2561 {
2562 dont_repeat ();
2563
2564 if (have_inferiors ())
2565 {
2566 if (!from_tty
2567 || !have_live_inferiors ()
2568 || query (_("A program is being debugged already. Kill it? ")))
2569 iterate_over_inferiors (dispose_inferior, NULL);
2570 else
2571 error (_("Program not killed."));
2572 }
2573
2574 /* Calling target_kill may remove the target from the stack. But if
2575 it doesn't (which seems like a win for UDI), remove it now. */
2576 /* Leave the exec target, though. The user may be switching from a
2577 live process to a core of the same program. */
2578 pop_all_targets_above (file_stratum);
2579
2580 target_pre_inferior (from_tty);
2581 }
2582
2583 /* Detach a target after doing deferred register stores. */
2584
2585 void
2586 target_detach (const char *args, int from_tty)
2587 {
2588 struct target_ops* t;
2589
2590 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2591 /* Don't remove global breakpoints here. They're removed on
2592 disconnection from the target. */
2593 ;
2594 else
2595 /* If we're in breakpoints-always-inserted mode, have to remove
2596 them before detaching. */
2597 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2598
2599 prepare_for_detach ();
2600
2601 current_target.to_detach (&current_target, args, from_tty);
2602 if (targetdebug)
2603 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2604 args, from_tty);
2605 }
2606
2607 void
2608 target_disconnect (char *args, int from_tty)
2609 {
2610 struct target_ops *t;
2611
2612 /* If we're in breakpoints-always-inserted mode or if breakpoints
2613 are global across processes, we have to remove them before
2614 disconnecting. */
2615 remove_breakpoints ();
2616
2617 for (t = current_target.beneath; t != NULL; t = t->beneath)
2618 if (t->to_disconnect != NULL)
2619 {
2620 if (targetdebug)
2621 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2622 args, from_tty);
2623 t->to_disconnect (t, args, from_tty);
2624 return;
2625 }
2626
2627 tcomplain ();
2628 }
2629
2630 ptid_t
2631 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2632 {
2633 struct target_ops *t;
2634 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2635 status, options);
2636
2637 if (targetdebug)
2638 {
2639 char *status_string;
2640 char *options_string;
2641
2642 status_string = target_waitstatus_to_string (status);
2643 options_string = target_options_to_string (options);
2644 fprintf_unfiltered (gdb_stdlog,
2645 "target_wait (%d, status, options={%s})"
2646 " = %d, %s\n",
2647 ptid_get_pid (ptid), options_string,
2648 ptid_get_pid (retval), status_string);
2649 xfree (status_string);
2650 xfree (options_string);
2651 }
2652
2653 return retval;
2654 }
2655
2656 char *
2657 target_pid_to_str (ptid_t ptid)
2658 {
2659 struct target_ops *t;
2660
2661 for (t = current_target.beneath; t != NULL; t = t->beneath)
2662 {
2663 if (t->to_pid_to_str != NULL)
2664 return (*t->to_pid_to_str) (t, ptid);
2665 }
2666
2667 return normal_pid_to_str (ptid);
2668 }
2669
2670 char *
2671 target_thread_name (struct thread_info *info)
2672 {
2673 return current_target.to_thread_name (&current_target, info);
2674 }
2675
2676 void
2677 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2678 {
2679 struct target_ops *t;
2680
2681 target_dcache_invalidate ();
2682
2683 current_target.to_resume (&current_target, ptid, step, signal);
2684 if (targetdebug)
2685 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2686 ptid_get_pid (ptid),
2687 step ? "step" : "continue",
2688 gdb_signal_to_name (signal));
2689
2690 registers_changed_ptid (ptid);
2691 set_executing (ptid, 1);
2692 set_running (ptid, 1);
2693 clear_inline_frame_state (ptid);
2694 }
2695
2696 void
2697 target_pass_signals (int numsigs, unsigned char *pass_signals)
2698 {
2699 struct target_ops *t;
2700
2701 for (t = current_target.beneath; t != NULL; t = t->beneath)
2702 {
2703 if (t->to_pass_signals != NULL)
2704 {
2705 if (targetdebug)
2706 {
2707 int i;
2708
2709 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2710 numsigs);
2711
2712 for (i = 0; i < numsigs; i++)
2713 if (pass_signals[i])
2714 fprintf_unfiltered (gdb_stdlog, " %s",
2715 gdb_signal_to_name (i));
2716
2717 fprintf_unfiltered (gdb_stdlog, " })\n");
2718 }
2719
2720 (*t->to_pass_signals) (t, numsigs, pass_signals);
2721 return;
2722 }
2723 }
2724 }
2725
2726 void
2727 target_program_signals (int numsigs, unsigned char *program_signals)
2728 {
2729 struct target_ops *t;
2730
2731 for (t = current_target.beneath; t != NULL; t = t->beneath)
2732 {
2733 if (t->to_program_signals != NULL)
2734 {
2735 if (targetdebug)
2736 {
2737 int i;
2738
2739 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2740 numsigs);
2741
2742 for (i = 0; i < numsigs; i++)
2743 if (program_signals[i])
2744 fprintf_unfiltered (gdb_stdlog, " %s",
2745 gdb_signal_to_name (i));
2746
2747 fprintf_unfiltered (gdb_stdlog, " })\n");
2748 }
2749
2750 (*t->to_program_signals) (t, numsigs, program_signals);
2751 return;
2752 }
2753 }
2754 }
2755
2756 /* Look through the list of possible targets for a target that can
2757 follow forks. */
2758
2759 int
2760 target_follow_fork (int follow_child, int detach_fork)
2761 {
2762 struct target_ops *t;
2763
2764 for (t = current_target.beneath; t != NULL; t = t->beneath)
2765 {
2766 if (t->to_follow_fork != NULL)
2767 {
2768 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2769
2770 if (targetdebug)
2771 fprintf_unfiltered (gdb_stdlog,
2772 "target_follow_fork (%d, %d) = %d\n",
2773 follow_child, detach_fork, retval);
2774 return retval;
2775 }
2776 }
2777
2778 /* Some target returned a fork event, but did not know how to follow it. */
2779 internal_error (__FILE__, __LINE__,
2780 _("could not find a target to follow fork"));
2781 }
2782
2783 void
2784 target_mourn_inferior (void)
2785 {
2786 struct target_ops *t;
2787
2788 for (t = current_target.beneath; t != NULL; t = t->beneath)
2789 {
2790 if (t->to_mourn_inferior != NULL)
2791 {
2792 t->to_mourn_inferior (t);
2793 if (targetdebug)
2794 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2795
2796 /* We no longer need to keep handles on any of the object files.
2797 Make sure to release them to avoid unnecessarily locking any
2798 of them while we're not actually debugging. */
2799 bfd_cache_close_all ();
2800
2801 return;
2802 }
2803 }
2804
2805 internal_error (__FILE__, __LINE__,
2806 _("could not find a target to follow mourn inferior"));
2807 }
2808
2809 /* Look for a target which can describe architectural features, starting
2810 from TARGET. If we find one, return its description. */
2811
2812 const struct target_desc *
2813 target_read_description (struct target_ops *target)
2814 {
2815 struct target_ops *t;
2816
2817 for (t = target; t != NULL; t = t->beneath)
2818 if (t->to_read_description != NULL)
2819 {
2820 const struct target_desc *tdesc;
2821
2822 tdesc = t->to_read_description (t);
2823 if (tdesc)
2824 return tdesc;
2825 }
2826
2827 return NULL;
2828 }
2829
2830 /* The default implementation of to_search_memory.
2831 This implements a basic search of memory, reading target memory and
2832 performing the search here (as opposed to performing the search in on the
2833 target side with, for example, gdbserver). */
2834
2835 int
2836 simple_search_memory (struct target_ops *ops,
2837 CORE_ADDR start_addr, ULONGEST search_space_len,
2838 const gdb_byte *pattern, ULONGEST pattern_len,
2839 CORE_ADDR *found_addrp)
2840 {
2841 /* NOTE: also defined in find.c testcase. */
2842 #define SEARCH_CHUNK_SIZE 16000
2843 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2844 /* Buffer to hold memory contents for searching. */
2845 gdb_byte *search_buf;
2846 unsigned search_buf_size;
2847 struct cleanup *old_cleanups;
2848
2849 search_buf_size = chunk_size + pattern_len - 1;
2850
2851 /* No point in trying to allocate a buffer larger than the search space. */
2852 if (search_space_len < search_buf_size)
2853 search_buf_size = search_space_len;
2854
2855 search_buf = malloc (search_buf_size);
2856 if (search_buf == NULL)
2857 error (_("Unable to allocate memory to perform the search."));
2858 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2859
2860 /* Prime the search buffer. */
2861
2862 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2863 search_buf, start_addr, search_buf_size) != search_buf_size)
2864 {
2865 warning (_("Unable to access %s bytes of target "
2866 "memory at %s, halting search."),
2867 pulongest (search_buf_size), hex_string (start_addr));
2868 do_cleanups (old_cleanups);
2869 return -1;
2870 }
2871
2872 /* Perform the search.
2873
2874 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2875 When we've scanned N bytes we copy the trailing bytes to the start and
2876 read in another N bytes. */
2877
2878 while (search_space_len >= pattern_len)
2879 {
2880 gdb_byte *found_ptr;
2881 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2882
2883 found_ptr = memmem (search_buf, nr_search_bytes,
2884 pattern, pattern_len);
2885
2886 if (found_ptr != NULL)
2887 {
2888 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2889
2890 *found_addrp = found_addr;
2891 do_cleanups (old_cleanups);
2892 return 1;
2893 }
2894
2895 /* Not found in this chunk, skip to next chunk. */
2896
2897 /* Don't let search_space_len wrap here, it's unsigned. */
2898 if (search_space_len >= chunk_size)
2899 search_space_len -= chunk_size;
2900 else
2901 search_space_len = 0;
2902
2903 if (search_space_len >= pattern_len)
2904 {
2905 unsigned keep_len = search_buf_size - chunk_size;
2906 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2907 int nr_to_read;
2908
2909 /* Copy the trailing part of the previous iteration to the front
2910 of the buffer for the next iteration. */
2911 gdb_assert (keep_len == pattern_len - 1);
2912 memcpy (search_buf, search_buf + chunk_size, keep_len);
2913
2914 nr_to_read = min (search_space_len - keep_len, chunk_size);
2915
2916 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2917 search_buf + keep_len, read_addr,
2918 nr_to_read) != nr_to_read)
2919 {
2920 warning (_("Unable to access %s bytes of target "
2921 "memory at %s, halting search."),
2922 plongest (nr_to_read),
2923 hex_string (read_addr));
2924 do_cleanups (old_cleanups);
2925 return -1;
2926 }
2927
2928 start_addr += chunk_size;
2929 }
2930 }
2931
2932 /* Not found. */
2933
2934 do_cleanups (old_cleanups);
2935 return 0;
2936 }
2937
2938 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2939 sequence of bytes in PATTERN with length PATTERN_LEN.
2940
2941 The result is 1 if found, 0 if not found, and -1 if there was an error
2942 requiring halting of the search (e.g. memory read error).
2943 If the pattern is found the address is recorded in FOUND_ADDRP. */
2944
2945 int
2946 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2947 const gdb_byte *pattern, ULONGEST pattern_len,
2948 CORE_ADDR *found_addrp)
2949 {
2950 struct target_ops *t;
2951 int found;
2952
2953 /* We don't use INHERIT to set current_target.to_search_memory,
2954 so we have to scan the target stack and handle targetdebug
2955 ourselves. */
2956
2957 if (targetdebug)
2958 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2959 hex_string (start_addr));
2960
2961 for (t = current_target.beneath; t != NULL; t = t->beneath)
2962 if (t->to_search_memory != NULL)
2963 break;
2964
2965 if (t != NULL)
2966 {
2967 found = t->to_search_memory (t, start_addr, search_space_len,
2968 pattern, pattern_len, found_addrp);
2969 }
2970 else
2971 {
2972 /* If a special version of to_search_memory isn't available, use the
2973 simple version. */
2974 found = simple_search_memory (current_target.beneath,
2975 start_addr, search_space_len,
2976 pattern, pattern_len, found_addrp);
2977 }
2978
2979 if (targetdebug)
2980 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2981
2982 return found;
2983 }
2984
2985 /* Look through the currently pushed targets. If none of them will
2986 be able to restart the currently running process, issue an error
2987 message. */
2988
2989 void
2990 target_require_runnable (void)
2991 {
2992 struct target_ops *t;
2993
2994 for (t = target_stack; t != NULL; t = t->beneath)
2995 {
2996 /* If this target knows how to create a new program, then
2997 assume we will still be able to after killing the current
2998 one. Either killing and mourning will not pop T, or else
2999 find_default_run_target will find it again. */
3000 if (t->to_create_inferior != NULL)
3001 return;
3002
3003 /* Do not worry about thread_stratum targets that can not
3004 create inferiors. Assume they will be pushed again if
3005 necessary, and continue to the process_stratum. */
3006 if (t->to_stratum == thread_stratum
3007 || t->to_stratum == arch_stratum)
3008 continue;
3009
3010 error (_("The \"%s\" target does not support \"run\". "
3011 "Try \"help target\" or \"continue\"."),
3012 t->to_shortname);
3013 }
3014
3015 /* This function is only called if the target is running. In that
3016 case there should have been a process_stratum target and it
3017 should either know how to create inferiors, or not... */
3018 internal_error (__FILE__, __LINE__, _("No targets found"));
3019 }
3020
3021 /* Look through the list of possible targets for a target that can
3022 execute a run or attach command without any other data. This is
3023 used to locate the default process stratum.
3024
3025 If DO_MESG is not NULL, the result is always valid (error() is
3026 called for errors); else, return NULL on error. */
3027
3028 static struct target_ops *
3029 find_default_run_target (char *do_mesg)
3030 {
3031 struct target_ops **t;
3032 struct target_ops *runable = NULL;
3033 int count;
3034
3035 count = 0;
3036
3037 for (t = target_structs; t < target_structs + target_struct_size;
3038 ++t)
3039 {
3040 if ((*t)->to_can_run && target_can_run (*t))
3041 {
3042 runable = *t;
3043 ++count;
3044 }
3045 }
3046
3047 if (count != 1)
3048 {
3049 if (do_mesg)
3050 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3051 else
3052 return NULL;
3053 }
3054
3055 return runable;
3056 }
3057
3058 void
3059 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3060 {
3061 struct target_ops *t;
3062
3063 t = find_default_run_target ("attach");
3064 (t->to_attach) (t, args, from_tty);
3065 return;
3066 }
3067
3068 void
3069 find_default_create_inferior (struct target_ops *ops,
3070 char *exec_file, char *allargs, char **env,
3071 int from_tty)
3072 {
3073 struct target_ops *t;
3074
3075 t = find_default_run_target ("run");
3076 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3077 return;
3078 }
3079
3080 static int
3081 find_default_can_async_p (struct target_ops *ignore)
3082 {
3083 struct target_ops *t;
3084
3085 /* This may be called before the target is pushed on the stack;
3086 look for the default process stratum. If there's none, gdb isn't
3087 configured with a native debugger, and target remote isn't
3088 connected yet. */
3089 t = find_default_run_target (NULL);
3090 if (t && t->to_can_async_p != delegate_can_async_p)
3091 return (t->to_can_async_p) (t);
3092 return 0;
3093 }
3094
3095 static int
3096 find_default_is_async_p (struct target_ops *ignore)
3097 {
3098 struct target_ops *t;
3099
3100 /* This may be called before the target is pushed on the stack;
3101 look for the default process stratum. If there's none, gdb isn't
3102 configured with a native debugger, and target remote isn't
3103 connected yet. */
3104 t = find_default_run_target (NULL);
3105 if (t && t->to_is_async_p != delegate_is_async_p)
3106 return (t->to_is_async_p) (t);
3107 return 0;
3108 }
3109
3110 static int
3111 find_default_supports_non_stop (struct target_ops *self)
3112 {
3113 struct target_ops *t;
3114
3115 t = find_default_run_target (NULL);
3116 if (t && t->to_supports_non_stop)
3117 return (t->to_supports_non_stop) (t);
3118 return 0;
3119 }
3120
3121 int
3122 target_supports_non_stop (void)
3123 {
3124 struct target_ops *t;
3125
3126 for (t = &current_target; t != NULL; t = t->beneath)
3127 if (t->to_supports_non_stop)
3128 return t->to_supports_non_stop (t);
3129
3130 return 0;
3131 }
3132
3133 /* Implement the "info proc" command. */
3134
3135 int
3136 target_info_proc (char *args, enum info_proc_what what)
3137 {
3138 struct target_ops *t;
3139
3140 /* If we're already connected to something that can get us OS
3141 related data, use it. Otherwise, try using the native
3142 target. */
3143 if (current_target.to_stratum >= process_stratum)
3144 t = current_target.beneath;
3145 else
3146 t = find_default_run_target (NULL);
3147
3148 for (; t != NULL; t = t->beneath)
3149 {
3150 if (t->to_info_proc != NULL)
3151 {
3152 t->to_info_proc (t, args, what);
3153
3154 if (targetdebug)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "target_info_proc (\"%s\", %d)\n", args, what);
3157
3158 return 1;
3159 }
3160 }
3161
3162 return 0;
3163 }
3164
3165 static int
3166 find_default_supports_disable_randomization (struct target_ops *self)
3167 {
3168 struct target_ops *t;
3169
3170 t = find_default_run_target (NULL);
3171 if (t && t->to_supports_disable_randomization)
3172 return (t->to_supports_disable_randomization) (t);
3173 return 0;
3174 }
3175
3176 int
3177 target_supports_disable_randomization (void)
3178 {
3179 struct target_ops *t;
3180
3181 for (t = &current_target; t != NULL; t = t->beneath)
3182 if (t->to_supports_disable_randomization)
3183 return t->to_supports_disable_randomization (t);
3184
3185 return 0;
3186 }
3187
3188 char *
3189 target_get_osdata (const char *type)
3190 {
3191 struct target_ops *t;
3192
3193 /* If we're already connected to something that can get us OS
3194 related data, use it. Otherwise, try using the native
3195 target. */
3196 if (current_target.to_stratum >= process_stratum)
3197 t = current_target.beneath;
3198 else
3199 t = find_default_run_target ("get OS data");
3200
3201 if (!t)
3202 return NULL;
3203
3204 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3205 }
3206
3207 /* Determine the current address space of thread PTID. */
3208
3209 struct address_space *
3210 target_thread_address_space (ptid_t ptid)
3211 {
3212 struct address_space *aspace;
3213 struct inferior *inf;
3214 struct target_ops *t;
3215
3216 for (t = current_target.beneath; t != NULL; t = t->beneath)
3217 {
3218 if (t->to_thread_address_space != NULL)
3219 {
3220 aspace = t->to_thread_address_space (t, ptid);
3221 gdb_assert (aspace);
3222
3223 if (targetdebug)
3224 fprintf_unfiltered (gdb_stdlog,
3225 "target_thread_address_space (%s) = %d\n",
3226 target_pid_to_str (ptid),
3227 address_space_num (aspace));
3228 return aspace;
3229 }
3230 }
3231
3232 /* Fall-back to the "main" address space of the inferior. */
3233 inf = find_inferior_pid (ptid_get_pid (ptid));
3234
3235 if (inf == NULL || inf->aspace == NULL)
3236 internal_error (__FILE__, __LINE__,
3237 _("Can't determine the current "
3238 "address space of thread %s\n"),
3239 target_pid_to_str (ptid));
3240
3241 return inf->aspace;
3242 }
3243
3244
3245 /* Target file operations. */
3246
3247 static struct target_ops *
3248 default_fileio_target (void)
3249 {
3250 /* If we're already connected to something that can perform
3251 file I/O, use it. Otherwise, try using the native target. */
3252 if (current_target.to_stratum >= process_stratum)
3253 return current_target.beneath;
3254 else
3255 return find_default_run_target ("file I/O");
3256 }
3257
3258 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3259 target file descriptor, or -1 if an error occurs (and set
3260 *TARGET_ERRNO). */
3261 int
3262 target_fileio_open (const char *filename, int flags, int mode,
3263 int *target_errno)
3264 {
3265 struct target_ops *t;
3266
3267 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3268 {
3269 if (t->to_fileio_open != NULL)
3270 {
3271 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
3272
3273 if (targetdebug)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3276 filename, flags, mode,
3277 fd, fd != -1 ? 0 : *target_errno);
3278 return fd;
3279 }
3280 }
3281
3282 *target_errno = FILEIO_ENOSYS;
3283 return -1;
3284 }
3285
3286 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3287 Return the number of bytes written, or -1 if an error occurs
3288 (and set *TARGET_ERRNO). */
3289 int
3290 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3291 ULONGEST offset, int *target_errno)
3292 {
3293 struct target_ops *t;
3294
3295 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3296 {
3297 if (t->to_fileio_pwrite != NULL)
3298 {
3299 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
3300 target_errno);
3301
3302 if (targetdebug)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "target_fileio_pwrite (%d,...,%d,%s) "
3305 "= %d (%d)\n",
3306 fd, len, pulongest (offset),
3307 ret, ret != -1 ? 0 : *target_errno);
3308 return ret;
3309 }
3310 }
3311
3312 *target_errno = FILEIO_ENOSYS;
3313 return -1;
3314 }
3315
3316 /* Read up to LEN bytes FD on the target into READ_BUF.
3317 Return the number of bytes read, or -1 if an error occurs
3318 (and set *TARGET_ERRNO). */
3319 int
3320 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3321 ULONGEST offset, int *target_errno)
3322 {
3323 struct target_ops *t;
3324
3325 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3326 {
3327 if (t->to_fileio_pread != NULL)
3328 {
3329 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3330 target_errno);
3331
3332 if (targetdebug)
3333 fprintf_unfiltered (gdb_stdlog,
3334 "target_fileio_pread (%d,...,%d,%s) "
3335 "= %d (%d)\n",
3336 fd, len, pulongest (offset),
3337 ret, ret != -1 ? 0 : *target_errno);
3338 return ret;
3339 }
3340 }
3341
3342 *target_errno = FILEIO_ENOSYS;
3343 return -1;
3344 }
3345
3346 /* Close FD on the target. Return 0, or -1 if an error occurs
3347 (and set *TARGET_ERRNO). */
3348 int
3349 target_fileio_close (int fd, int *target_errno)
3350 {
3351 struct target_ops *t;
3352
3353 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3354 {
3355 if (t->to_fileio_close != NULL)
3356 {
3357 int ret = t->to_fileio_close (t, fd, target_errno);
3358
3359 if (targetdebug)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "target_fileio_close (%d) = %d (%d)\n",
3362 fd, ret, ret != -1 ? 0 : *target_errno);
3363 return ret;
3364 }
3365 }
3366
3367 *target_errno = FILEIO_ENOSYS;
3368 return -1;
3369 }
3370
3371 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3372 occurs (and set *TARGET_ERRNO). */
3373 int
3374 target_fileio_unlink (const char *filename, int *target_errno)
3375 {
3376 struct target_ops *t;
3377
3378 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3379 {
3380 if (t->to_fileio_unlink != NULL)
3381 {
3382 int ret = t->to_fileio_unlink (t, filename, target_errno);
3383
3384 if (targetdebug)
3385 fprintf_unfiltered (gdb_stdlog,
3386 "target_fileio_unlink (%s) = %d (%d)\n",
3387 filename, ret, ret != -1 ? 0 : *target_errno);
3388 return ret;
3389 }
3390 }
3391
3392 *target_errno = FILEIO_ENOSYS;
3393 return -1;
3394 }
3395
3396 /* Read value of symbolic link FILENAME on the target. Return a
3397 null-terminated string allocated via xmalloc, or NULL if an error
3398 occurs (and set *TARGET_ERRNO). */
3399 char *
3400 target_fileio_readlink (const char *filename, int *target_errno)
3401 {
3402 struct target_ops *t;
3403
3404 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3405 {
3406 if (t->to_fileio_readlink != NULL)
3407 {
3408 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3409
3410 if (targetdebug)
3411 fprintf_unfiltered (gdb_stdlog,
3412 "target_fileio_readlink (%s) = %s (%d)\n",
3413 filename, ret? ret : "(nil)",
3414 ret? 0 : *target_errno);
3415 return ret;
3416 }
3417 }
3418
3419 *target_errno = FILEIO_ENOSYS;
3420 return NULL;
3421 }
3422
3423 static void
3424 target_fileio_close_cleanup (void *opaque)
3425 {
3426 int fd = *(int *) opaque;
3427 int target_errno;
3428
3429 target_fileio_close (fd, &target_errno);
3430 }
3431
3432 /* Read target file FILENAME. Store the result in *BUF_P and
3433 return the size of the transferred data. PADDING additional bytes are
3434 available in *BUF_P. This is a helper function for
3435 target_fileio_read_alloc; see the declaration of that function for more
3436 information. */
3437
3438 static LONGEST
3439 target_fileio_read_alloc_1 (const char *filename,
3440 gdb_byte **buf_p, int padding)
3441 {
3442 struct cleanup *close_cleanup;
3443 size_t buf_alloc, buf_pos;
3444 gdb_byte *buf;
3445 LONGEST n;
3446 int fd;
3447 int target_errno;
3448
3449 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3450 if (fd == -1)
3451 return -1;
3452
3453 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3454
3455 /* Start by reading up to 4K at a time. The target will throttle
3456 this number down if necessary. */
3457 buf_alloc = 4096;
3458 buf = xmalloc (buf_alloc);
3459 buf_pos = 0;
3460 while (1)
3461 {
3462 n = target_fileio_pread (fd, &buf[buf_pos],
3463 buf_alloc - buf_pos - padding, buf_pos,
3464 &target_errno);
3465 if (n < 0)
3466 {
3467 /* An error occurred. */
3468 do_cleanups (close_cleanup);
3469 xfree (buf);
3470 return -1;
3471 }
3472 else if (n == 0)
3473 {
3474 /* Read all there was. */
3475 do_cleanups (close_cleanup);
3476 if (buf_pos == 0)
3477 xfree (buf);
3478 else
3479 *buf_p = buf;
3480 return buf_pos;
3481 }
3482
3483 buf_pos += n;
3484
3485 /* If the buffer is filling up, expand it. */
3486 if (buf_alloc < buf_pos * 2)
3487 {
3488 buf_alloc *= 2;
3489 buf = xrealloc (buf, buf_alloc);
3490 }
3491
3492 QUIT;
3493 }
3494 }
3495
3496 /* Read target file FILENAME. Store the result in *BUF_P and return
3497 the size of the transferred data. See the declaration in "target.h"
3498 function for more information about the return value. */
3499
3500 LONGEST
3501 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3502 {
3503 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3504 }
3505
3506 /* Read target file FILENAME. The result is NUL-terminated and
3507 returned as a string, allocated using xmalloc. If an error occurs
3508 or the transfer is unsupported, NULL is returned. Empty objects
3509 are returned as allocated but empty strings. A warning is issued
3510 if the result contains any embedded NUL bytes. */
3511
3512 char *
3513 target_fileio_read_stralloc (const char *filename)
3514 {
3515 gdb_byte *buffer;
3516 char *bufstr;
3517 LONGEST i, transferred;
3518
3519 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3520 bufstr = (char *) buffer;
3521
3522 if (transferred < 0)
3523 return NULL;
3524
3525 if (transferred == 0)
3526 return xstrdup ("");
3527
3528 bufstr[transferred] = 0;
3529
3530 /* Check for embedded NUL bytes; but allow trailing NULs. */
3531 for (i = strlen (bufstr); i < transferred; i++)
3532 if (bufstr[i] != 0)
3533 {
3534 warning (_("target file %s "
3535 "contained unexpected null characters"),
3536 filename);
3537 break;
3538 }
3539
3540 return bufstr;
3541 }
3542
3543
3544 static int
3545 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3546 CORE_ADDR addr, int len)
3547 {
3548 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3549 }
3550
3551 static int
3552 default_watchpoint_addr_within_range (struct target_ops *target,
3553 CORE_ADDR addr,
3554 CORE_ADDR start, int length)
3555 {
3556 return addr >= start && addr < start + length;
3557 }
3558
3559 static struct gdbarch *
3560 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3561 {
3562 return target_gdbarch ();
3563 }
3564
3565 static int
3566 return_zero (void)
3567 {
3568 return 0;
3569 }
3570
3571 static int
3572 return_minus_one (void)
3573 {
3574 return -1;
3575 }
3576
3577 static void *
3578 return_null (void)
3579 {
3580 return 0;
3581 }
3582
3583 /*
3584 * Find the next target down the stack from the specified target.
3585 */
3586
3587 struct target_ops *
3588 find_target_beneath (struct target_ops *t)
3589 {
3590 return t->beneath;
3591 }
3592
3593 /* See target.h. */
3594
3595 struct target_ops *
3596 find_target_at (enum strata stratum)
3597 {
3598 struct target_ops *t;
3599
3600 for (t = current_target.beneath; t != NULL; t = t->beneath)
3601 if (t->to_stratum == stratum)
3602 return t;
3603
3604 return NULL;
3605 }
3606
3607 \f
3608 /* The inferior process has died. Long live the inferior! */
3609
3610 void
3611 generic_mourn_inferior (void)
3612 {
3613 ptid_t ptid;
3614
3615 ptid = inferior_ptid;
3616 inferior_ptid = null_ptid;
3617
3618 /* Mark breakpoints uninserted in case something tries to delete a
3619 breakpoint while we delete the inferior's threads (which would
3620 fail, since the inferior is long gone). */
3621 mark_breakpoints_out ();
3622
3623 if (!ptid_equal (ptid, null_ptid))
3624 {
3625 int pid = ptid_get_pid (ptid);
3626 exit_inferior (pid);
3627 }
3628
3629 /* Note this wipes step-resume breakpoints, so needs to be done
3630 after exit_inferior, which ends up referencing the step-resume
3631 breakpoints through clear_thread_inferior_resources. */
3632 breakpoint_init_inferior (inf_exited);
3633
3634 registers_changed ();
3635
3636 reopen_exec_file ();
3637 reinit_frame_cache ();
3638
3639 if (deprecated_detach_hook)
3640 deprecated_detach_hook ();
3641 }
3642 \f
3643 /* Convert a normal process ID to a string. Returns the string in a
3644 static buffer. */
3645
3646 char *
3647 normal_pid_to_str (ptid_t ptid)
3648 {
3649 static char buf[32];
3650
3651 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3652 return buf;
3653 }
3654
3655 static char *
3656 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3657 {
3658 return normal_pid_to_str (ptid);
3659 }
3660
3661 /* Error-catcher for target_find_memory_regions. */
3662 static int
3663 dummy_find_memory_regions (struct target_ops *self,
3664 find_memory_region_ftype ignore1, void *ignore2)
3665 {
3666 error (_("Command not implemented for this target."));
3667 return 0;
3668 }
3669
3670 /* Error-catcher for target_make_corefile_notes. */
3671 static char *
3672 dummy_make_corefile_notes (struct target_ops *self,
3673 bfd *ignore1, int *ignore2)
3674 {
3675 error (_("Command not implemented for this target."));
3676 return NULL;
3677 }
3678
3679 /* Error-catcher for target_get_bookmark. */
3680 static gdb_byte *
3681 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3682 {
3683 tcomplain ();
3684 return NULL;
3685 }
3686
3687 /* Error-catcher for target_goto_bookmark. */
3688 static void
3689 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3690 {
3691 tcomplain ();
3692 }
3693
3694 /* Set up the handful of non-empty slots needed by the dummy target
3695 vector. */
3696
3697 static void
3698 init_dummy_target (void)
3699 {
3700 dummy_target.to_shortname = "None";
3701 dummy_target.to_longname = "None";
3702 dummy_target.to_doc = "";
3703 dummy_target.to_create_inferior = find_default_create_inferior;
3704 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3705 dummy_target.to_supports_disable_randomization
3706 = find_default_supports_disable_randomization;
3707 dummy_target.to_pid_to_str = dummy_pid_to_str;
3708 dummy_target.to_stratum = dummy_stratum;
3709 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3710 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3711 dummy_target.to_get_bookmark = dummy_get_bookmark;
3712 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3713 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3714 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3715 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3716 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3717 dummy_target.to_has_execution
3718 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3719 dummy_target.to_magic = OPS_MAGIC;
3720
3721 install_dummy_methods (&dummy_target);
3722 }
3723 \f
3724 static void
3725 debug_to_open (char *args, int from_tty)
3726 {
3727 debug_target.to_open (args, from_tty);
3728
3729 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3730 }
3731
3732 void
3733 target_close (struct target_ops *targ)
3734 {
3735 gdb_assert (!target_is_pushed (targ));
3736
3737 if (targ->to_xclose != NULL)
3738 targ->to_xclose (targ);
3739 else if (targ->to_close != NULL)
3740 targ->to_close (targ);
3741
3742 if (targetdebug)
3743 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3744 }
3745
3746 void
3747 target_attach (char *args, int from_tty)
3748 {
3749 current_target.to_attach (&current_target, args, from_tty);
3750 if (targetdebug)
3751 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3752 args, from_tty);
3753 }
3754
3755 int
3756 target_thread_alive (ptid_t ptid)
3757 {
3758 struct target_ops *t;
3759
3760 for (t = current_target.beneath; t != NULL; t = t->beneath)
3761 {
3762 if (t->to_thread_alive != NULL)
3763 {
3764 int retval;
3765
3766 retval = t->to_thread_alive (t, ptid);
3767 if (targetdebug)
3768 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3769 ptid_get_pid (ptid), retval);
3770
3771 return retval;
3772 }
3773 }
3774
3775 return 0;
3776 }
3777
3778 void
3779 target_find_new_threads (void)
3780 {
3781 struct target_ops *t;
3782
3783 for (t = current_target.beneath; t != NULL; t = t->beneath)
3784 {
3785 if (t->to_find_new_threads != NULL)
3786 {
3787 t->to_find_new_threads (t);
3788 if (targetdebug)
3789 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3790
3791 return;
3792 }
3793 }
3794 }
3795
3796 void
3797 target_stop (ptid_t ptid)
3798 {
3799 if (!may_stop)
3800 {
3801 warning (_("May not interrupt or stop the target, ignoring attempt"));
3802 return;
3803 }
3804
3805 (*current_target.to_stop) (&current_target, ptid);
3806 }
3807
3808 static void
3809 debug_to_post_attach (struct target_ops *self, int pid)
3810 {
3811 debug_target.to_post_attach (&debug_target, pid);
3812
3813 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3814 }
3815
3816 /* Concatenate ELEM to LIST, a comma separate list, and return the
3817 result. The LIST incoming argument is released. */
3818
3819 static char *
3820 str_comma_list_concat_elem (char *list, const char *elem)
3821 {
3822 if (list == NULL)
3823 return xstrdup (elem);
3824 else
3825 return reconcat (list, list, ", ", elem, (char *) NULL);
3826 }
3827
3828 /* Helper for target_options_to_string. If OPT is present in
3829 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3830 Returns the new resulting string. OPT is removed from
3831 TARGET_OPTIONS. */
3832
3833 static char *
3834 do_option (int *target_options, char *ret,
3835 int opt, char *opt_str)
3836 {
3837 if ((*target_options & opt) != 0)
3838 {
3839 ret = str_comma_list_concat_elem (ret, opt_str);
3840 *target_options &= ~opt;
3841 }
3842
3843 return ret;
3844 }
3845
3846 char *
3847 target_options_to_string (int target_options)
3848 {
3849 char *ret = NULL;
3850
3851 #define DO_TARG_OPTION(OPT) \
3852 ret = do_option (&target_options, ret, OPT, #OPT)
3853
3854 DO_TARG_OPTION (TARGET_WNOHANG);
3855
3856 if (target_options != 0)
3857 ret = str_comma_list_concat_elem (ret, "unknown???");
3858
3859 if (ret == NULL)
3860 ret = xstrdup ("");
3861 return ret;
3862 }
3863
3864 static void
3865 debug_print_register (const char * func,
3866 struct regcache *regcache, int regno)
3867 {
3868 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3869
3870 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3871 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3872 && gdbarch_register_name (gdbarch, regno) != NULL
3873 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3874 fprintf_unfiltered (gdb_stdlog, "(%s)",
3875 gdbarch_register_name (gdbarch, regno));
3876 else
3877 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3878 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3879 {
3880 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3881 int i, size = register_size (gdbarch, regno);
3882 gdb_byte buf[MAX_REGISTER_SIZE];
3883
3884 regcache_raw_collect (regcache, regno, buf);
3885 fprintf_unfiltered (gdb_stdlog, " = ");
3886 for (i = 0; i < size; i++)
3887 {
3888 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3889 }
3890 if (size <= sizeof (LONGEST))
3891 {
3892 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3893
3894 fprintf_unfiltered (gdb_stdlog, " %s %s",
3895 core_addr_to_string_nz (val), plongest (val));
3896 }
3897 }
3898 fprintf_unfiltered (gdb_stdlog, "\n");
3899 }
3900
3901 void
3902 target_fetch_registers (struct regcache *regcache, int regno)
3903 {
3904 struct target_ops *t;
3905
3906 for (t = current_target.beneath; t != NULL; t = t->beneath)
3907 {
3908 if (t->to_fetch_registers != NULL)
3909 {
3910 t->to_fetch_registers (t, regcache, regno);
3911 if (targetdebug)
3912 debug_print_register ("target_fetch_registers", regcache, regno);
3913 return;
3914 }
3915 }
3916 }
3917
3918 void
3919 target_store_registers (struct regcache *regcache, int regno)
3920 {
3921 struct target_ops *t;
3922
3923 if (!may_write_registers)
3924 error (_("Writing to registers is not allowed (regno %d)"), regno);
3925
3926 current_target.to_store_registers (&current_target, regcache, regno);
3927 if (targetdebug)
3928 {
3929 debug_print_register ("target_store_registers", regcache, regno);
3930 }
3931 }
3932
3933 int
3934 target_core_of_thread (ptid_t ptid)
3935 {
3936 struct target_ops *t;
3937
3938 for (t = current_target.beneath; t != NULL; t = t->beneath)
3939 {
3940 if (t->to_core_of_thread != NULL)
3941 {
3942 int retval = t->to_core_of_thread (t, ptid);
3943
3944 if (targetdebug)
3945 fprintf_unfiltered (gdb_stdlog,
3946 "target_core_of_thread (%d) = %d\n",
3947 ptid_get_pid (ptid), retval);
3948 return retval;
3949 }
3950 }
3951
3952 return -1;
3953 }
3954
3955 int
3956 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3957 {
3958 struct target_ops *t;
3959
3960 for (t = current_target.beneath; t != NULL; t = t->beneath)
3961 {
3962 if (t->to_verify_memory != NULL)
3963 {
3964 int retval = t->to_verify_memory (t, data, memaddr, size);
3965
3966 if (targetdebug)
3967 fprintf_unfiltered (gdb_stdlog,
3968 "target_verify_memory (%s, %s) = %d\n",
3969 paddress (target_gdbarch (), memaddr),
3970 pulongest (size),
3971 retval);
3972 return retval;
3973 }
3974 }
3975
3976 tcomplain ();
3977 }
3978
3979 /* The documentation for this function is in its prototype declaration in
3980 target.h. */
3981
3982 int
3983 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3984 {
3985 struct target_ops *t;
3986
3987 for (t = current_target.beneath; t != NULL; t = t->beneath)
3988 if (t->to_insert_mask_watchpoint != NULL)
3989 {
3990 int ret;
3991
3992 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3993
3994 if (targetdebug)
3995 fprintf_unfiltered (gdb_stdlog, "\
3996 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3997 core_addr_to_string (addr),
3998 core_addr_to_string (mask), rw, ret);
3999
4000 return ret;
4001 }
4002
4003 return 1;
4004 }
4005
4006 /* The documentation for this function is in its prototype declaration in
4007 target.h. */
4008
4009 int
4010 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4011 {
4012 struct target_ops *t;
4013
4014 for (t = current_target.beneath; t != NULL; t = t->beneath)
4015 if (t->to_remove_mask_watchpoint != NULL)
4016 {
4017 int ret;
4018
4019 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4020
4021 if (targetdebug)
4022 fprintf_unfiltered (gdb_stdlog, "\
4023 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4024 core_addr_to_string (addr),
4025 core_addr_to_string (mask), rw, ret);
4026
4027 return ret;
4028 }
4029
4030 return 1;
4031 }
4032
4033 /* The documentation for this function is in its prototype declaration
4034 in target.h. */
4035
4036 int
4037 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4038 {
4039 struct target_ops *t;
4040
4041 for (t = current_target.beneath; t != NULL; t = t->beneath)
4042 if (t->to_masked_watch_num_registers != NULL)
4043 return t->to_masked_watch_num_registers (t, addr, mask);
4044
4045 return -1;
4046 }
4047
4048 /* The documentation for this function is in its prototype declaration
4049 in target.h. */
4050
4051 int
4052 target_ranged_break_num_registers (void)
4053 {
4054 struct target_ops *t;
4055
4056 for (t = current_target.beneath; t != NULL; t = t->beneath)
4057 if (t->to_ranged_break_num_registers != NULL)
4058 return t->to_ranged_break_num_registers (t);
4059
4060 return -1;
4061 }
4062
4063 /* See target.h. */
4064
4065 struct btrace_target_info *
4066 target_enable_btrace (ptid_t ptid)
4067 {
4068 struct target_ops *t;
4069
4070 for (t = current_target.beneath; t != NULL; t = t->beneath)
4071 if (t->to_enable_btrace != NULL)
4072 return t->to_enable_btrace (t, ptid);
4073
4074 tcomplain ();
4075 return NULL;
4076 }
4077
4078 /* See target.h. */
4079
4080 void
4081 target_disable_btrace (struct btrace_target_info *btinfo)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 if (t->to_disable_btrace != NULL)
4087 {
4088 t->to_disable_btrace (t, btinfo);
4089 return;
4090 }
4091
4092 tcomplain ();
4093 }
4094
4095 /* See target.h. */
4096
4097 void
4098 target_teardown_btrace (struct btrace_target_info *btinfo)
4099 {
4100 struct target_ops *t;
4101
4102 for (t = current_target.beneath; t != NULL; t = t->beneath)
4103 if (t->to_teardown_btrace != NULL)
4104 {
4105 t->to_teardown_btrace (t, btinfo);
4106 return;
4107 }
4108
4109 tcomplain ();
4110 }
4111
4112 /* See target.h. */
4113
4114 enum btrace_error
4115 target_read_btrace (VEC (btrace_block_s) **btrace,
4116 struct btrace_target_info *btinfo,
4117 enum btrace_read_type type)
4118 {
4119 struct target_ops *t;
4120
4121 for (t = current_target.beneath; t != NULL; t = t->beneath)
4122 if (t->to_read_btrace != NULL)
4123 return t->to_read_btrace (t, btrace, btinfo, type);
4124
4125 tcomplain ();
4126 return BTRACE_ERR_NOT_SUPPORTED;
4127 }
4128
4129 /* See target.h. */
4130
4131 void
4132 target_stop_recording (void)
4133 {
4134 struct target_ops *t;
4135
4136 for (t = current_target.beneath; t != NULL; t = t->beneath)
4137 if (t->to_stop_recording != NULL)
4138 {
4139 t->to_stop_recording (t);
4140 return;
4141 }
4142
4143 /* This is optional. */
4144 }
4145
4146 /* See target.h. */
4147
4148 void
4149 target_info_record (void)
4150 {
4151 struct target_ops *t;
4152
4153 for (t = current_target.beneath; t != NULL; t = t->beneath)
4154 if (t->to_info_record != NULL)
4155 {
4156 t->to_info_record (t);
4157 return;
4158 }
4159
4160 tcomplain ();
4161 }
4162
4163 /* See target.h. */
4164
4165 void
4166 target_save_record (const char *filename)
4167 {
4168 struct target_ops *t;
4169
4170 for (t = current_target.beneath; t != NULL; t = t->beneath)
4171 if (t->to_save_record != NULL)
4172 {
4173 t->to_save_record (t, filename);
4174 return;
4175 }
4176
4177 tcomplain ();
4178 }
4179
4180 /* See target.h. */
4181
4182 int
4183 target_supports_delete_record (void)
4184 {
4185 struct target_ops *t;
4186
4187 for (t = current_target.beneath; t != NULL; t = t->beneath)
4188 if (t->to_delete_record != NULL)
4189 return 1;
4190
4191 return 0;
4192 }
4193
4194 /* See target.h. */
4195
4196 void
4197 target_delete_record (void)
4198 {
4199 struct target_ops *t;
4200
4201 for (t = current_target.beneath; t != NULL; t = t->beneath)
4202 if (t->to_delete_record != NULL)
4203 {
4204 t->to_delete_record (t);
4205 return;
4206 }
4207
4208 tcomplain ();
4209 }
4210
4211 /* See target.h. */
4212
4213 int
4214 target_record_is_replaying (void)
4215 {
4216 struct target_ops *t;
4217
4218 for (t = current_target.beneath; t != NULL; t = t->beneath)
4219 if (t->to_record_is_replaying != NULL)
4220 return t->to_record_is_replaying (t);
4221
4222 return 0;
4223 }
4224
4225 /* See target.h. */
4226
4227 void
4228 target_goto_record_begin (void)
4229 {
4230 struct target_ops *t;
4231
4232 for (t = current_target.beneath; t != NULL; t = t->beneath)
4233 if (t->to_goto_record_begin != NULL)
4234 {
4235 t->to_goto_record_begin (t);
4236 return;
4237 }
4238
4239 tcomplain ();
4240 }
4241
4242 /* See target.h. */
4243
4244 void
4245 target_goto_record_end (void)
4246 {
4247 struct target_ops *t;
4248
4249 for (t = current_target.beneath; t != NULL; t = t->beneath)
4250 if (t->to_goto_record_end != NULL)
4251 {
4252 t->to_goto_record_end (t);
4253 return;
4254 }
4255
4256 tcomplain ();
4257 }
4258
4259 /* See target.h. */
4260
4261 void
4262 target_goto_record (ULONGEST insn)
4263 {
4264 struct target_ops *t;
4265
4266 for (t = current_target.beneath; t != NULL; t = t->beneath)
4267 if (t->to_goto_record != NULL)
4268 {
4269 t->to_goto_record (t, insn);
4270 return;
4271 }
4272
4273 tcomplain ();
4274 }
4275
4276 /* See target.h. */
4277
4278 void
4279 target_insn_history (int size, int flags)
4280 {
4281 struct target_ops *t;
4282
4283 for (t = current_target.beneath; t != NULL; t = t->beneath)
4284 if (t->to_insn_history != NULL)
4285 {
4286 t->to_insn_history (t, size, flags);
4287 return;
4288 }
4289
4290 tcomplain ();
4291 }
4292
4293 /* See target.h. */
4294
4295 void
4296 target_insn_history_from (ULONGEST from, int size, int flags)
4297 {
4298 struct target_ops *t;
4299
4300 for (t = current_target.beneath; t != NULL; t = t->beneath)
4301 if (t->to_insn_history_from != NULL)
4302 {
4303 t->to_insn_history_from (t, from, size, flags);
4304 return;
4305 }
4306
4307 tcomplain ();
4308 }
4309
4310 /* See target.h. */
4311
4312 void
4313 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4314 {
4315 struct target_ops *t;
4316
4317 for (t = current_target.beneath; t != NULL; t = t->beneath)
4318 if (t->to_insn_history_range != NULL)
4319 {
4320 t->to_insn_history_range (t, begin, end, flags);
4321 return;
4322 }
4323
4324 tcomplain ();
4325 }
4326
4327 /* See target.h. */
4328
4329 void
4330 target_call_history (int size, int flags)
4331 {
4332 struct target_ops *t;
4333
4334 for (t = current_target.beneath; t != NULL; t = t->beneath)
4335 if (t->to_call_history != NULL)
4336 {
4337 t->to_call_history (t, size, flags);
4338 return;
4339 }
4340
4341 tcomplain ();
4342 }
4343
4344 /* See target.h. */
4345
4346 void
4347 target_call_history_from (ULONGEST begin, int size, int flags)
4348 {
4349 struct target_ops *t;
4350
4351 for (t = current_target.beneath; t != NULL; t = t->beneath)
4352 if (t->to_call_history_from != NULL)
4353 {
4354 t->to_call_history_from (t, begin, size, flags);
4355 return;
4356 }
4357
4358 tcomplain ();
4359 }
4360
4361 /* See target.h. */
4362
4363 void
4364 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4365 {
4366 struct target_ops *t;
4367
4368 for (t = current_target.beneath; t != NULL; t = t->beneath)
4369 if (t->to_call_history_range != NULL)
4370 {
4371 t->to_call_history_range (t, begin, end, flags);
4372 return;
4373 }
4374
4375 tcomplain ();
4376 }
4377
4378 static void
4379 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4380 {
4381 debug_target.to_prepare_to_store (&debug_target, regcache);
4382
4383 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4384 }
4385
4386 /* See target.h. */
4387
4388 const struct frame_unwind *
4389 target_get_unwinder (void)
4390 {
4391 struct target_ops *t;
4392
4393 for (t = current_target.beneath; t != NULL; t = t->beneath)
4394 if (t->to_get_unwinder != NULL)
4395 return t->to_get_unwinder;
4396
4397 return NULL;
4398 }
4399
4400 /* See target.h. */
4401
4402 const struct frame_unwind *
4403 target_get_tailcall_unwinder (void)
4404 {
4405 struct target_ops *t;
4406
4407 for (t = current_target.beneath; t != NULL; t = t->beneath)
4408 if (t->to_get_tailcall_unwinder != NULL)
4409 return t->to_get_tailcall_unwinder;
4410
4411 return NULL;
4412 }
4413
4414 /* See target.h. */
4415
4416 CORE_ADDR
4417 forward_target_decr_pc_after_break (struct target_ops *ops,
4418 struct gdbarch *gdbarch)
4419 {
4420 for (; ops != NULL; ops = ops->beneath)
4421 if (ops->to_decr_pc_after_break != NULL)
4422 return ops->to_decr_pc_after_break (ops, gdbarch);
4423
4424 return gdbarch_decr_pc_after_break (gdbarch);
4425 }
4426
4427 /* See target.h. */
4428
4429 CORE_ADDR
4430 target_decr_pc_after_break (struct gdbarch *gdbarch)
4431 {
4432 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4433 }
4434
4435 static int
4436 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4437 int write, struct mem_attrib *attrib,
4438 struct target_ops *target)
4439 {
4440 int retval;
4441
4442 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4443 attrib, target);
4444
4445 fprintf_unfiltered (gdb_stdlog,
4446 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4447 paddress (target_gdbarch (), memaddr), len,
4448 write ? "write" : "read", retval);
4449
4450 if (retval > 0)
4451 {
4452 int i;
4453
4454 fputs_unfiltered (", bytes =", gdb_stdlog);
4455 for (i = 0; i < retval; i++)
4456 {
4457 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4458 {
4459 if (targetdebug < 2 && i > 0)
4460 {
4461 fprintf_unfiltered (gdb_stdlog, " ...");
4462 break;
4463 }
4464 fprintf_unfiltered (gdb_stdlog, "\n");
4465 }
4466
4467 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4468 }
4469 }
4470
4471 fputc_unfiltered ('\n', gdb_stdlog);
4472
4473 return retval;
4474 }
4475
4476 static void
4477 debug_to_files_info (struct target_ops *target)
4478 {
4479 debug_target.to_files_info (target);
4480
4481 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4482 }
4483
4484 static int
4485 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4486 struct bp_target_info *bp_tgt)
4487 {
4488 int retval;
4489
4490 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4491
4492 fprintf_unfiltered (gdb_stdlog,
4493 "target_insert_breakpoint (%s, xxx) = %ld\n",
4494 core_addr_to_string (bp_tgt->placed_address),
4495 (unsigned long) retval);
4496 return retval;
4497 }
4498
4499 static int
4500 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4501 struct bp_target_info *bp_tgt)
4502 {
4503 int retval;
4504
4505 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4506
4507 fprintf_unfiltered (gdb_stdlog,
4508 "target_remove_breakpoint (%s, xxx) = %ld\n",
4509 core_addr_to_string (bp_tgt->placed_address),
4510 (unsigned long) retval);
4511 return retval;
4512 }
4513
4514 static int
4515 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4516 int type, int cnt, int from_tty)
4517 {
4518 int retval;
4519
4520 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4521 type, cnt, from_tty);
4522
4523 fprintf_unfiltered (gdb_stdlog,
4524 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4525 (unsigned long) type,
4526 (unsigned long) cnt,
4527 (unsigned long) from_tty,
4528 (unsigned long) retval);
4529 return retval;
4530 }
4531
4532 static int
4533 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4534 CORE_ADDR addr, int len)
4535 {
4536 CORE_ADDR retval;
4537
4538 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4539 addr, len);
4540
4541 fprintf_unfiltered (gdb_stdlog,
4542 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4543 core_addr_to_string (addr), (unsigned long) len,
4544 core_addr_to_string (retval));
4545 return retval;
4546 }
4547
4548 static int
4549 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4550 CORE_ADDR addr, int len, int rw,
4551 struct expression *cond)
4552 {
4553 int retval;
4554
4555 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4556 addr, len,
4557 rw, cond);
4558
4559 fprintf_unfiltered (gdb_stdlog,
4560 "target_can_accel_watchpoint_condition "
4561 "(%s, %d, %d, %s) = %ld\n",
4562 core_addr_to_string (addr), len, rw,
4563 host_address_to_string (cond), (unsigned long) retval);
4564 return retval;
4565 }
4566
4567 static int
4568 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4569 {
4570 int retval;
4571
4572 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4573
4574 fprintf_unfiltered (gdb_stdlog,
4575 "target_stopped_by_watchpoint () = %ld\n",
4576 (unsigned long) retval);
4577 return retval;
4578 }
4579
4580 static int
4581 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4582 {
4583 int retval;
4584
4585 retval = debug_target.to_stopped_data_address (target, addr);
4586
4587 fprintf_unfiltered (gdb_stdlog,
4588 "target_stopped_data_address ([%s]) = %ld\n",
4589 core_addr_to_string (*addr),
4590 (unsigned long)retval);
4591 return retval;
4592 }
4593
4594 static int
4595 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4596 CORE_ADDR addr,
4597 CORE_ADDR start, int length)
4598 {
4599 int retval;
4600
4601 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4602 start, length);
4603
4604 fprintf_filtered (gdb_stdlog,
4605 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4606 core_addr_to_string (addr), core_addr_to_string (start),
4607 length, retval);
4608 return retval;
4609 }
4610
4611 static int
4612 debug_to_insert_hw_breakpoint (struct target_ops *self,
4613 struct gdbarch *gdbarch,
4614 struct bp_target_info *bp_tgt)
4615 {
4616 int retval;
4617
4618 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4619 gdbarch, bp_tgt);
4620
4621 fprintf_unfiltered (gdb_stdlog,
4622 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4623 core_addr_to_string (bp_tgt->placed_address),
4624 (unsigned long) retval);
4625 return retval;
4626 }
4627
4628 static int
4629 debug_to_remove_hw_breakpoint (struct target_ops *self,
4630 struct gdbarch *gdbarch,
4631 struct bp_target_info *bp_tgt)
4632 {
4633 int retval;
4634
4635 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4636 gdbarch, bp_tgt);
4637
4638 fprintf_unfiltered (gdb_stdlog,
4639 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4640 core_addr_to_string (bp_tgt->placed_address),
4641 (unsigned long) retval);
4642 return retval;
4643 }
4644
4645 static int
4646 debug_to_insert_watchpoint (struct target_ops *self,
4647 CORE_ADDR addr, int len, int type,
4648 struct expression *cond)
4649 {
4650 int retval;
4651
4652 retval = debug_target.to_insert_watchpoint (&debug_target,
4653 addr, len, type, cond);
4654
4655 fprintf_unfiltered (gdb_stdlog,
4656 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4657 core_addr_to_string (addr), len, type,
4658 host_address_to_string (cond), (unsigned long) retval);
4659 return retval;
4660 }
4661
4662 static int
4663 debug_to_remove_watchpoint (struct target_ops *self,
4664 CORE_ADDR addr, int len, int type,
4665 struct expression *cond)
4666 {
4667 int retval;
4668
4669 retval = debug_target.to_remove_watchpoint (&debug_target,
4670 addr, len, type, cond);
4671
4672 fprintf_unfiltered (gdb_stdlog,
4673 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4674 core_addr_to_string (addr), len, type,
4675 host_address_to_string (cond), (unsigned long) retval);
4676 return retval;
4677 }
4678
4679 static void
4680 debug_to_terminal_init (struct target_ops *self)
4681 {
4682 debug_target.to_terminal_init (&debug_target);
4683
4684 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4685 }
4686
4687 static void
4688 debug_to_terminal_inferior (struct target_ops *self)
4689 {
4690 debug_target.to_terminal_inferior (&debug_target);
4691
4692 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4693 }
4694
4695 static void
4696 debug_to_terminal_ours_for_output (struct target_ops *self)
4697 {
4698 debug_target.to_terminal_ours_for_output (&debug_target);
4699
4700 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4701 }
4702
4703 static void
4704 debug_to_terminal_ours (struct target_ops *self)
4705 {
4706 debug_target.to_terminal_ours (&debug_target);
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_save_ours (struct target_ops *self)
4713 {
4714 debug_target.to_terminal_save_ours (&debug_target);
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_info (struct target_ops *self,
4721 const char *arg, int from_tty)
4722 {
4723 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4724
4725 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4726 from_tty);
4727 }
4728
4729 static void
4730 debug_to_load (struct target_ops *self, char *args, int from_tty)
4731 {
4732 debug_target.to_load (&debug_target, args, from_tty);
4733
4734 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4735 }
4736
4737 static void
4738 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4739 {
4740 debug_target.to_post_startup_inferior (&debug_target, ptid);
4741
4742 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4743 ptid_get_pid (ptid));
4744 }
4745
4746 static int
4747 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4748 {
4749 int retval;
4750
4751 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4752
4753 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4754 pid, retval);
4755
4756 return retval;
4757 }
4758
4759 static int
4760 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4761 {
4762 int retval;
4763
4764 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4765
4766 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4767 pid, retval);
4768
4769 return retval;
4770 }
4771
4772 static int
4773 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4774 {
4775 int retval;
4776
4777 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4778
4779 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4780 pid, retval);
4781
4782 return retval;
4783 }
4784
4785 static int
4786 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4787 {
4788 int retval;
4789
4790 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4791
4792 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4793 pid, retval);
4794
4795 return retval;
4796 }
4797
4798 static int
4799 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4800 {
4801 int retval;
4802
4803 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4804
4805 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4806 pid, retval);
4807
4808 return retval;
4809 }
4810
4811 static int
4812 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4813 {
4814 int retval;
4815
4816 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4819 pid, retval);
4820
4821 return retval;
4822 }
4823
4824 static int
4825 debug_to_has_exited (struct target_ops *self,
4826 int pid, int wait_status, int *exit_status)
4827 {
4828 int has_exited;
4829
4830 has_exited = debug_target.to_has_exited (&debug_target,
4831 pid, wait_status, exit_status);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4834 pid, wait_status, *exit_status, has_exited);
4835
4836 return has_exited;
4837 }
4838
4839 static int
4840 debug_to_can_run (struct target_ops *self)
4841 {
4842 int retval;
4843
4844 retval = debug_target.to_can_run (&debug_target);
4845
4846 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4847
4848 return retval;
4849 }
4850
4851 static struct gdbarch *
4852 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4853 {
4854 struct gdbarch *retval;
4855
4856 retval = debug_target.to_thread_architecture (ops, ptid);
4857
4858 fprintf_unfiltered (gdb_stdlog,
4859 "target_thread_architecture (%s) = %s [%s]\n",
4860 target_pid_to_str (ptid),
4861 host_address_to_string (retval),
4862 gdbarch_bfd_arch_info (retval)->printable_name);
4863 return retval;
4864 }
4865
4866 static void
4867 debug_to_stop (struct target_ops *self, ptid_t ptid)
4868 {
4869 debug_target.to_stop (&debug_target, ptid);
4870
4871 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4872 target_pid_to_str (ptid));
4873 }
4874
4875 static void
4876 debug_to_rcmd (struct target_ops *self, char *command,
4877 struct ui_file *outbuf)
4878 {
4879 debug_target.to_rcmd (&debug_target, command, outbuf);
4880 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4881 }
4882
4883 static char *
4884 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4885 {
4886 char *exec_file;
4887
4888 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4889
4890 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4891 pid, exec_file);
4892
4893 return exec_file;
4894 }
4895
4896 static void
4897 setup_target_debug (void)
4898 {
4899 memcpy (&debug_target, &current_target, sizeof debug_target);
4900
4901 current_target.to_open = debug_to_open;
4902 current_target.to_post_attach = debug_to_post_attach;
4903 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4904 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4905 current_target.to_files_info = debug_to_files_info;
4906 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4907 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4908 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4909 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4910 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4911 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4912 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4913 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4914 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4915 current_target.to_watchpoint_addr_within_range
4916 = debug_to_watchpoint_addr_within_range;
4917 current_target.to_region_ok_for_hw_watchpoint
4918 = debug_to_region_ok_for_hw_watchpoint;
4919 current_target.to_can_accel_watchpoint_condition
4920 = debug_to_can_accel_watchpoint_condition;
4921 current_target.to_terminal_init = debug_to_terminal_init;
4922 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4923 current_target.to_terminal_ours_for_output
4924 = debug_to_terminal_ours_for_output;
4925 current_target.to_terminal_ours = debug_to_terminal_ours;
4926 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4927 current_target.to_terminal_info = debug_to_terminal_info;
4928 current_target.to_load = debug_to_load;
4929 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4930 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4931 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4932 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4933 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4934 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4935 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4936 current_target.to_has_exited = debug_to_has_exited;
4937 current_target.to_can_run = debug_to_can_run;
4938 current_target.to_stop = debug_to_stop;
4939 current_target.to_rcmd = debug_to_rcmd;
4940 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4941 current_target.to_thread_architecture = debug_to_thread_architecture;
4942 }
4943 \f
4944
4945 static char targ_desc[] =
4946 "Names of targets and files being debugged.\nShows the entire \
4947 stack of targets currently in use (including the exec-file,\n\
4948 core-file, and process, if any), as well as the symbol file name.";
4949
4950 static void
4951 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4952 {
4953 error (_("\"monitor\" command not supported by this target."));
4954 }
4955
4956 static void
4957 do_monitor_command (char *cmd,
4958 int from_tty)
4959 {
4960 target_rcmd (cmd, gdb_stdtarg);
4961 }
4962
4963 /* Print the name of each layers of our target stack. */
4964
4965 static void
4966 maintenance_print_target_stack (char *cmd, int from_tty)
4967 {
4968 struct target_ops *t;
4969
4970 printf_filtered (_("The current target stack is:\n"));
4971
4972 for (t = target_stack; t != NULL; t = t->beneath)
4973 {
4974 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4975 }
4976 }
4977
4978 /* Controls if async mode is permitted. */
4979 int target_async_permitted = 0;
4980
4981 /* The set command writes to this variable. If the inferior is
4982 executing, target_async_permitted is *not* updated. */
4983 static int target_async_permitted_1 = 0;
4984
4985 static void
4986 set_target_async_command (char *args, int from_tty,
4987 struct cmd_list_element *c)
4988 {
4989 if (have_live_inferiors ())
4990 {
4991 target_async_permitted_1 = target_async_permitted;
4992 error (_("Cannot change this setting while the inferior is running."));
4993 }
4994
4995 target_async_permitted = target_async_permitted_1;
4996 }
4997
4998 static void
4999 show_target_async_command (struct ui_file *file, int from_tty,
5000 struct cmd_list_element *c,
5001 const char *value)
5002 {
5003 fprintf_filtered (file,
5004 _("Controlling the inferior in "
5005 "asynchronous mode is %s.\n"), value);
5006 }
5007
5008 /* Temporary copies of permission settings. */
5009
5010 static int may_write_registers_1 = 1;
5011 static int may_write_memory_1 = 1;
5012 static int may_insert_breakpoints_1 = 1;
5013 static int may_insert_tracepoints_1 = 1;
5014 static int may_insert_fast_tracepoints_1 = 1;
5015 static int may_stop_1 = 1;
5016
5017 /* Make the user-set values match the real values again. */
5018
5019 void
5020 update_target_permissions (void)
5021 {
5022 may_write_registers_1 = may_write_registers;
5023 may_write_memory_1 = may_write_memory;
5024 may_insert_breakpoints_1 = may_insert_breakpoints;
5025 may_insert_tracepoints_1 = may_insert_tracepoints;
5026 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5027 may_stop_1 = may_stop;
5028 }
5029
5030 /* The one function handles (most of) the permission flags in the same
5031 way. */
5032
5033 static void
5034 set_target_permissions (char *args, int from_tty,
5035 struct cmd_list_element *c)
5036 {
5037 if (target_has_execution)
5038 {
5039 update_target_permissions ();
5040 error (_("Cannot change this setting while the inferior is running."));
5041 }
5042
5043 /* Make the real values match the user-changed values. */
5044 may_write_registers = may_write_registers_1;
5045 may_insert_breakpoints = may_insert_breakpoints_1;
5046 may_insert_tracepoints = may_insert_tracepoints_1;
5047 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5048 may_stop = may_stop_1;
5049 update_observer_mode ();
5050 }
5051
5052 /* Set memory write permission independently of observer mode. */
5053
5054 static void
5055 set_write_memory_permission (char *args, int from_tty,
5056 struct cmd_list_element *c)
5057 {
5058 /* Make the real values match the user-changed values. */
5059 may_write_memory = may_write_memory_1;
5060 update_observer_mode ();
5061 }
5062
5063
5064 void
5065 initialize_targets (void)
5066 {
5067 init_dummy_target ();
5068 push_target (&dummy_target);
5069
5070 add_info ("target", target_info, targ_desc);
5071 add_info ("files", target_info, targ_desc);
5072
5073 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5074 Set target debugging."), _("\
5075 Show target debugging."), _("\
5076 When non-zero, target debugging is enabled. Higher numbers are more\n\
5077 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5078 command."),
5079 NULL,
5080 show_targetdebug,
5081 &setdebuglist, &showdebuglist);
5082
5083 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5084 &trust_readonly, _("\
5085 Set mode for reading from readonly sections."), _("\
5086 Show mode for reading from readonly sections."), _("\
5087 When this mode is on, memory reads from readonly sections (such as .text)\n\
5088 will be read from the object file instead of from the target. This will\n\
5089 result in significant performance improvement for remote targets."),
5090 NULL,
5091 show_trust_readonly,
5092 &setlist, &showlist);
5093
5094 add_com ("monitor", class_obscure, do_monitor_command,
5095 _("Send a command to the remote monitor (remote targets only)."));
5096
5097 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5098 _("Print the name of each layer of the internal target stack."),
5099 &maintenanceprintlist);
5100
5101 add_setshow_boolean_cmd ("target-async", no_class,
5102 &target_async_permitted_1, _("\
5103 Set whether gdb controls the inferior in asynchronous mode."), _("\
5104 Show whether gdb controls the inferior in asynchronous mode."), _("\
5105 Tells gdb whether to control the inferior in asynchronous mode."),
5106 set_target_async_command,
5107 show_target_async_command,
5108 &setlist,
5109 &showlist);
5110
5111 add_setshow_boolean_cmd ("may-write-registers", class_support,
5112 &may_write_registers_1, _("\
5113 Set permission to write into registers."), _("\
5114 Show permission to write into registers."), _("\
5115 When this permission is on, GDB may write into the target's registers.\n\
5116 Otherwise, any sort of write attempt will result in an error."),
5117 set_target_permissions, NULL,
5118 &setlist, &showlist);
5119
5120 add_setshow_boolean_cmd ("may-write-memory", class_support,
5121 &may_write_memory_1, _("\
5122 Set permission to write into target memory."), _("\
5123 Show permission to write into target memory."), _("\
5124 When this permission is on, GDB may write into the target's memory.\n\
5125 Otherwise, any sort of write attempt will result in an error."),
5126 set_write_memory_permission, NULL,
5127 &setlist, &showlist);
5128
5129 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5130 &may_insert_breakpoints_1, _("\
5131 Set permission to insert breakpoints in the target."), _("\
5132 Show permission to insert breakpoints in the target."), _("\
5133 When this permission is on, GDB may insert breakpoints in the program.\n\
5134 Otherwise, any sort of insertion attempt will result in an error."),
5135 set_target_permissions, NULL,
5136 &setlist, &showlist);
5137
5138 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5139 &may_insert_tracepoints_1, _("\
5140 Set permission to insert tracepoints in the target."), _("\
5141 Show permission to insert tracepoints in the target."), _("\
5142 When this permission is on, GDB may insert tracepoints in the program.\n\
5143 Otherwise, any sort of insertion attempt will result in an error."),
5144 set_target_permissions, NULL,
5145 &setlist, &showlist);
5146
5147 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5148 &may_insert_fast_tracepoints_1, _("\
5149 Set permission to insert fast tracepoints in the target."), _("\
5150 Show permission to insert fast tracepoints in the target."), _("\
5151 When this permission is on, GDB may insert fast tracepoints.\n\
5152 Otherwise, any sort of insertion attempt will result in an error."),
5153 set_target_permissions, NULL,
5154 &setlist, &showlist);
5155
5156 add_setshow_boolean_cmd ("may-interrupt", class_support,
5157 &may_stop_1, _("\
5158 Set permission to interrupt or signal the target."), _("\
5159 Show permission to interrupt or signal the target."), _("\
5160 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5161 Otherwise, any attempt to interrupt or stop will be ignored."),
5162 set_target_permissions, NULL,
5163 &setlist, &showlist);
5164 }
This page took 0.127563 seconds and 5 git commands to generate.