Provide default target methods for record targets that are likely to be shared
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_terminal_info (char *, int);
146
147 static void debug_to_load (char *, int);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_stop (ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_index;
159 unsigned target_struct_allocsize;
160 #define DEFAULT_ALLOCSIZE 10
161
162 /* The initial current target, so that there is always a semi-valid
163 current target. */
164
165 static struct target_ops dummy_target;
166
167 /* Top of target stack. */
168
169 static struct target_ops *target_stack;
170
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174 struct target_ops current_target;
175
176 /* Command list for target. */
177
178 static struct cmd_list_element *targetlist = NULL;
179
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183 static int trust_readonly = 0;
184
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188 static int show_memory_breakpoints = 0;
189
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194 int may_write_registers = 1;
195
196 int may_write_memory = 1;
197
198 int may_insert_breakpoints = 1;
199
200 int may_insert_tracepoints = 1;
201
202 int may_insert_fast_tracepoints = 1;
203
204 int may_stop = 1;
205
206 /* Non-zero if we want to see trace of target level stuff. */
207
208 static unsigned int targetdebug = 0;
209 static void
210 show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212 {
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214 }
215
216 static void setup_target_debug (void);
217
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1 = 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p = 1;
224
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230 static void
231 set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233 {
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238 }
239
240 static void
241 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243 {
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245 }
246
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE *target_dcache;
249
250 /* Invalidate the target dcache. */
251
252 void
253 target_dcache_invalidate (void)
254 {
255 dcache_invalidate (target_dcache);
256 }
257
258 /* The user just typed 'target' without the name of a target. */
259
260 static void
261 target_command (char *arg, int from_tty)
262 {
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265 }
266
267 /* Default target_has_* methods for process_stratum targets. */
268
269 int
270 default_child_has_all_memory (struct target_ops *ops)
271 {
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_memory (struct target_ops *ops)
281 {
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289 int
290 default_child_has_stack (struct target_ops *ops)
291 {
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297 }
298
299 int
300 default_child_has_registers (struct target_ops *ops)
301 {
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307 }
308
309 int
310 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
311 {
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
314 if (ptid_equal (the_ptid, null_ptid))
315 return 0;
316
317 return 1;
318 }
319
320
321 int
322 target_has_all_memory_1 (void)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_memory_1 (void)
335 {
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343 }
344
345 int
346 target_has_stack_1 (void)
347 {
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355 }
356
357 int
358 target_has_registers_1 (void)
359 {
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367 }
368
369 int
370 target_has_execution_1 (ptid_t the_ptid)
371 {
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
375 if (t->to_has_execution (t, the_ptid))
376 return 1;
377
378 return 0;
379 }
380
381 int
382 target_has_execution_current (void)
383 {
384 return target_has_execution_1 (inferior_ptid);
385 }
386
387 /* Add a possible target architecture to the list. */
388
389 void
390 add_target (struct target_ops *t)
391 {
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435 }
436
437 /* See target.h. */
438
439 void
440 add_deprecated_target_alias (struct target_ops *t, char *alias)
441 {
442 struct cmd_list_element *c;
443 char *alt;
444
445 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
446 see PR cli/15104. */
447 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
448 alt = xstrprintf ("target %s", t->to_shortname);
449 deprecate_cmd (c, alt);
450 }
451
452 /* Stub functions */
453
454 void
455 target_ignore (void)
456 {
457 }
458
459 void
460 target_kill (void)
461 {
462 struct target_ops *t;
463
464 for (t = current_target.beneath; t != NULL; t = t->beneath)
465 if (t->to_kill != NULL)
466 {
467 if (targetdebug)
468 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
469
470 t->to_kill (t);
471 return;
472 }
473
474 noprocess ();
475 }
476
477 void
478 target_load (char *arg, int from_tty)
479 {
480 target_dcache_invalidate ();
481 (*current_target.to_load) (arg, from_tty);
482 }
483
484 void
485 target_create_inferior (char *exec_file, char *args,
486 char **env, int from_tty)
487 {
488 struct target_ops *t;
489
490 for (t = current_target.beneath; t != NULL; t = t->beneath)
491 {
492 if (t->to_create_inferior != NULL)
493 {
494 t->to_create_inferior (t, exec_file, args, env, from_tty);
495 if (targetdebug)
496 fprintf_unfiltered (gdb_stdlog,
497 "target_create_inferior (%s, %s, xxx, %d)\n",
498 exec_file, args, from_tty);
499 return;
500 }
501 }
502
503 internal_error (__FILE__, __LINE__,
504 _("could not find a target to create inferior"));
505 }
506
507 void
508 target_terminal_inferior (void)
509 {
510 /* A background resume (``run&'') should leave GDB in control of the
511 terminal. Use target_can_async_p, not target_is_async_p, since at
512 this point the target is not async yet. However, if sync_execution
513 is not set, we know it will become async prior to resume. */
514 if (target_can_async_p () && !sync_execution)
515 return;
516
517 /* If GDB is resuming the inferior in the foreground, install
518 inferior's terminal modes. */
519 (*current_target.to_terminal_inferior) ();
520 }
521
522 static int
523 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
524 struct target_ops *t)
525 {
526 errno = EIO; /* Can't read/write this location. */
527 return 0; /* No bytes handled. */
528 }
529
530 static void
531 tcomplain (void)
532 {
533 error (_("You can't do that when your target is `%s'"),
534 current_target.to_shortname);
535 }
536
537 void
538 noprocess (void)
539 {
540 error (_("You can't do that without a process to debug."));
541 }
542
543 static void
544 default_terminal_info (char *args, int from_tty)
545 {
546 printf_unfiltered (_("No saved terminal information.\n"));
547 }
548
549 /* A default implementation for the to_get_ada_task_ptid target method.
550
551 This function builds the PTID by using both LWP and TID as part of
552 the PTID lwp and tid elements. The pid used is the pid of the
553 inferior_ptid. */
554
555 static ptid_t
556 default_get_ada_task_ptid (long lwp, long tid)
557 {
558 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
559 }
560
561 static enum exec_direction_kind
562 default_execution_direction (void)
563 {
564 if (!target_can_execute_reverse)
565 return EXEC_FORWARD;
566 else if (!target_can_async_p ())
567 return EXEC_FORWARD;
568 else
569 gdb_assert_not_reached ("\
570 to_execution_direction must be implemented for reverse async");
571 }
572
573 /* Go through the target stack from top to bottom, copying over zero
574 entries in current_target, then filling in still empty entries. In
575 effect, we are doing class inheritance through the pushed target
576 vectors.
577
578 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
579 is currently implemented, is that it discards any knowledge of
580 which target an inherited method originally belonged to.
581 Consequently, new new target methods should instead explicitly and
582 locally search the target stack for the target that can handle the
583 request. */
584
585 static void
586 update_current_target (void)
587 {
588 struct target_ops *t;
589
590 /* First, reset current's contents. */
591 memset (&current_target, 0, sizeof (current_target));
592
593 #define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 INHERIT (to_post_attach, t);
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 INHERIT (to_prepare_to_store, t);
614 INHERIT (deprecated_xfer_memory, t);
615 INHERIT (to_files_info, t);
616 INHERIT (to_insert_breakpoint, t);
617 INHERIT (to_remove_breakpoint, t);
618 INHERIT (to_can_use_hw_breakpoint, t);
619 INHERIT (to_insert_hw_breakpoint, t);
620 INHERIT (to_remove_hw_breakpoint, t);
621 /* Do not inherit to_ranged_break_num_registers. */
622 INHERIT (to_insert_watchpoint, t);
623 INHERIT (to_remove_watchpoint, t);
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
626 INHERIT (to_stopped_data_address, t);
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 INHERIT (to_stopped_by_watchpoint, t);
630 INHERIT (to_watchpoint_addr_within_range, t);
631 INHERIT (to_region_ok_for_hw_watchpoint, t);
632 INHERIT (to_can_accel_watchpoint_condition, t);
633 /* Do not inherit to_masked_watch_num_registers. */
634 INHERIT (to_terminal_init, t);
635 INHERIT (to_terminal_inferior, t);
636 INHERIT (to_terminal_ours_for_output, t);
637 INHERIT (to_terminal_ours, t);
638 INHERIT (to_terminal_save_ours, t);
639 INHERIT (to_terminal_info, t);
640 /* Do not inherit to_kill. */
641 INHERIT (to_load, t);
642 /* Do no inherit to_create_inferior. */
643 INHERIT (to_post_startup_inferior, t);
644 INHERIT (to_insert_fork_catchpoint, t);
645 INHERIT (to_remove_fork_catchpoint, t);
646 INHERIT (to_insert_vfork_catchpoint, t);
647 INHERIT (to_remove_vfork_catchpoint, t);
648 /* Do not inherit to_follow_fork. */
649 INHERIT (to_insert_exec_catchpoint, t);
650 INHERIT (to_remove_exec_catchpoint, t);
651 INHERIT (to_set_syscall_catchpoint, t);
652 INHERIT (to_has_exited, t);
653 /* Do not inherit to_mourn_inferior. */
654 INHERIT (to_can_run, t);
655 /* Do not inherit to_pass_signals. */
656 /* Do not inherit to_program_signals. */
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 INHERIT (to_extra_thread_info, t);
661 INHERIT (to_thread_name, t);
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 INHERIT (to_rcmd, t);
665 INHERIT (to_pid_to_exec_file, t);
666 INHERIT (to_log_command, t);
667 INHERIT (to_stratum, t);
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
673 INHERIT (to_has_thread_control, t);
674 INHERIT (to_can_async_p, t);
675 INHERIT (to_is_async_p, t);
676 INHERIT (to_async, t);
677 INHERIT (to_find_memory_regions, t);
678 INHERIT (to_make_corefile_notes, t);
679 INHERIT (to_get_bookmark, t);
680 INHERIT (to_goto_bookmark, t);
681 /* Do not inherit to_get_thread_local_address. */
682 INHERIT (to_can_execute_reverse, t);
683 INHERIT (to_execution_direction, t);
684 INHERIT (to_thread_architecture, t);
685 /* Do not inherit to_read_description. */
686 INHERIT (to_get_ada_task_ptid, t);
687 /* Do not inherit to_search_memory. */
688 INHERIT (to_supports_multi_process, t);
689 INHERIT (to_supports_enable_disable_tracepoint, t);
690 INHERIT (to_supports_string_tracing, t);
691 INHERIT (to_trace_init, t);
692 INHERIT (to_download_tracepoint, t);
693 INHERIT (to_can_download_tracepoint, t);
694 INHERIT (to_download_trace_state_variable, t);
695 INHERIT (to_enable_tracepoint, t);
696 INHERIT (to_disable_tracepoint, t);
697 INHERIT (to_trace_set_readonly_regions, t);
698 INHERIT (to_trace_start, t);
699 INHERIT (to_get_trace_status, t);
700 INHERIT (to_get_tracepoint_status, t);
701 INHERIT (to_trace_stop, t);
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
711 INHERIT (to_set_trace_buffer_size, t);
712 INHERIT (to_set_trace_notes, t);
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
717 INHERIT (to_traceframe_info, t);
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
720 INHERIT (to_magic, t);
721 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
722 INHERIT (to_can_run_breakpoint_commands, t);
723 /* Do not inherit to_memory_map. */
724 /* Do not inherit to_flash_erase. */
725 /* Do not inherit to_flash_done. */
726 }
727 #undef INHERIT
728
729 /* Clean up a target struct so it no longer has any zero pointers in
730 it. Some entries are defaulted to a method that print an error,
731 others are hard-wired to a standard recursive default. */
732
733 #define de_fault(field, value) \
734 if (!current_target.field) \
735 current_target.field = value
736
737 de_fault (to_open,
738 (void (*) (char *, int))
739 tcomplain);
740 de_fault (to_close,
741 (void (*) (int))
742 target_ignore);
743 de_fault (to_post_attach,
744 (void (*) (int))
745 target_ignore);
746 de_fault (to_prepare_to_store,
747 (void (*) (struct regcache *))
748 noprocess);
749 de_fault (deprecated_xfer_memory,
750 (int (*) (CORE_ADDR, gdb_byte *, int, int,
751 struct mem_attrib *, struct target_ops *))
752 nomemory);
753 de_fault (to_files_info,
754 (void (*) (struct target_ops *))
755 target_ignore);
756 de_fault (to_insert_breakpoint,
757 memory_insert_breakpoint);
758 de_fault (to_remove_breakpoint,
759 memory_remove_breakpoint);
760 de_fault (to_can_use_hw_breakpoint,
761 (int (*) (int, int, int))
762 return_zero);
763 de_fault (to_insert_hw_breakpoint,
764 (int (*) (struct gdbarch *, struct bp_target_info *))
765 return_minus_one);
766 de_fault (to_remove_hw_breakpoint,
767 (int (*) (struct gdbarch *, struct bp_target_info *))
768 return_minus_one);
769 de_fault (to_insert_watchpoint,
770 (int (*) (CORE_ADDR, int, int, struct expression *))
771 return_minus_one);
772 de_fault (to_remove_watchpoint,
773 (int (*) (CORE_ADDR, int, int, struct expression *))
774 return_minus_one);
775 de_fault (to_stopped_by_watchpoint,
776 (int (*) (void))
777 return_zero);
778 de_fault (to_stopped_data_address,
779 (int (*) (struct target_ops *, CORE_ADDR *))
780 return_zero);
781 de_fault (to_watchpoint_addr_within_range,
782 default_watchpoint_addr_within_range);
783 de_fault (to_region_ok_for_hw_watchpoint,
784 default_region_ok_for_hw_watchpoint);
785 de_fault (to_can_accel_watchpoint_condition,
786 (int (*) (CORE_ADDR, int, int, struct expression *))
787 return_zero);
788 de_fault (to_terminal_init,
789 (void (*) (void))
790 target_ignore);
791 de_fault (to_terminal_inferior,
792 (void (*) (void))
793 target_ignore);
794 de_fault (to_terminal_ours_for_output,
795 (void (*) (void))
796 target_ignore);
797 de_fault (to_terminal_ours,
798 (void (*) (void))
799 target_ignore);
800 de_fault (to_terminal_save_ours,
801 (void (*) (void))
802 target_ignore);
803 de_fault (to_terminal_info,
804 default_terminal_info);
805 de_fault (to_load,
806 (void (*) (char *, int))
807 tcomplain);
808 de_fault (to_post_startup_inferior,
809 (void (*) (ptid_t))
810 target_ignore);
811 de_fault (to_insert_fork_catchpoint,
812 (int (*) (int))
813 return_one);
814 de_fault (to_remove_fork_catchpoint,
815 (int (*) (int))
816 return_one);
817 de_fault (to_insert_vfork_catchpoint,
818 (int (*) (int))
819 return_one);
820 de_fault (to_remove_vfork_catchpoint,
821 (int (*) (int))
822 return_one);
823 de_fault (to_insert_exec_catchpoint,
824 (int (*) (int))
825 return_one);
826 de_fault (to_remove_exec_catchpoint,
827 (int (*) (int))
828 return_one);
829 de_fault (to_set_syscall_catchpoint,
830 (int (*) (int, int, int, int, int *))
831 return_one);
832 de_fault (to_has_exited,
833 (int (*) (int, int, int *))
834 return_zero);
835 de_fault (to_can_run,
836 return_zero);
837 de_fault (to_extra_thread_info,
838 (char *(*) (struct thread_info *))
839 return_zero);
840 de_fault (to_thread_name,
841 (char *(*) (struct thread_info *))
842 return_zero);
843 de_fault (to_stop,
844 (void (*) (ptid_t))
845 target_ignore);
846 current_target.to_xfer_partial = current_xfer_partial;
847 de_fault (to_rcmd,
848 (void (*) (char *, struct ui_file *))
849 tcomplain);
850 de_fault (to_pid_to_exec_file,
851 (char *(*) (int))
852 return_zero);
853 de_fault (to_async,
854 (void (*) (void (*) (enum inferior_event_type, void*), void*))
855 tcomplain);
856 de_fault (to_thread_architecture,
857 default_thread_architecture);
858 current_target.to_read_description = NULL;
859 de_fault (to_get_ada_task_ptid,
860 (ptid_t (*) (long, long))
861 default_get_ada_task_ptid);
862 de_fault (to_supports_multi_process,
863 (int (*) (void))
864 return_zero);
865 de_fault (to_supports_enable_disable_tracepoint,
866 (int (*) (void))
867 return_zero);
868 de_fault (to_supports_string_tracing,
869 (int (*) (void))
870 return_zero);
871 de_fault (to_trace_init,
872 (void (*) (void))
873 tcomplain);
874 de_fault (to_download_tracepoint,
875 (void (*) (struct bp_location *))
876 tcomplain);
877 de_fault (to_can_download_tracepoint,
878 (int (*) (void))
879 return_zero);
880 de_fault (to_download_trace_state_variable,
881 (void (*) (struct trace_state_variable *))
882 tcomplain);
883 de_fault (to_enable_tracepoint,
884 (void (*) (struct bp_location *))
885 tcomplain);
886 de_fault (to_disable_tracepoint,
887 (void (*) (struct bp_location *))
888 tcomplain);
889 de_fault (to_trace_set_readonly_regions,
890 (void (*) (void))
891 tcomplain);
892 de_fault (to_trace_start,
893 (void (*) (void))
894 tcomplain);
895 de_fault (to_get_trace_status,
896 (int (*) (struct trace_status *))
897 return_minus_one);
898 de_fault (to_get_tracepoint_status,
899 (void (*) (struct breakpoint *, struct uploaded_tp *))
900 tcomplain);
901 de_fault (to_trace_stop,
902 (void (*) (void))
903 tcomplain);
904 de_fault (to_trace_find,
905 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
906 return_minus_one);
907 de_fault (to_get_trace_state_variable_value,
908 (int (*) (int, LONGEST *))
909 return_zero);
910 de_fault (to_save_trace_data,
911 (int (*) (const char *))
912 tcomplain);
913 de_fault (to_upload_tracepoints,
914 (int (*) (struct uploaded_tp **))
915 return_zero);
916 de_fault (to_upload_trace_state_variables,
917 (int (*) (struct uploaded_tsv **))
918 return_zero);
919 de_fault (to_get_raw_trace_data,
920 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
921 tcomplain);
922 de_fault (to_get_min_fast_tracepoint_insn_len,
923 (int (*) (void))
924 return_minus_one);
925 de_fault (to_set_disconnected_tracing,
926 (void (*) (int))
927 target_ignore);
928 de_fault (to_set_circular_trace_buffer,
929 (void (*) (int))
930 target_ignore);
931 de_fault (to_set_trace_buffer_size,
932 (void (*) (LONGEST))
933 target_ignore);
934 de_fault (to_set_trace_notes,
935 (int (*) (char *, char *, char *))
936 return_zero);
937 de_fault (to_get_tib_address,
938 (int (*) (ptid_t, CORE_ADDR *))
939 tcomplain);
940 de_fault (to_set_permissions,
941 (void (*) (void))
942 target_ignore);
943 de_fault (to_static_tracepoint_marker_at,
944 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
945 return_zero);
946 de_fault (to_static_tracepoint_markers_by_strid,
947 (VEC(static_tracepoint_marker_p) * (*) (const char *))
948 tcomplain);
949 de_fault (to_traceframe_info,
950 (struct traceframe_info * (*) (void))
951 tcomplain);
952 de_fault (to_supports_evaluation_of_breakpoint_conditions,
953 (int (*) (void))
954 return_zero);
955 de_fault (to_can_run_breakpoint_commands,
956 (int (*) (void))
957 return_zero);
958 de_fault (to_use_agent,
959 (int (*) (int))
960 tcomplain);
961 de_fault (to_can_use_agent,
962 (int (*) (void))
963 return_zero);
964 de_fault (to_execution_direction, default_execution_direction);
965
966 #undef de_fault
967
968 /* Finally, position the target-stack beneath the squashed
969 "current_target". That way code looking for a non-inherited
970 target method can quickly and simply find it. */
971 current_target.beneath = target_stack;
972
973 if (targetdebug)
974 setup_target_debug ();
975 }
976
977 /* Push a new target type into the stack of the existing target accessors,
978 possibly superseding some of the existing accessors.
979
980 Rather than allow an empty stack, we always have the dummy target at
981 the bottom stratum, so we can call the function vectors without
982 checking them. */
983
984 void
985 push_target (struct target_ops *t)
986 {
987 struct target_ops **cur;
988
989 /* Check magic number. If wrong, it probably means someone changed
990 the struct definition, but not all the places that initialize one. */
991 if (t->to_magic != OPS_MAGIC)
992 {
993 fprintf_unfiltered (gdb_stderr,
994 "Magic number of %s target struct wrong\n",
995 t->to_shortname);
996 internal_error (__FILE__, __LINE__,
997 _("failed internal consistency check"));
998 }
999
1000 /* Find the proper stratum to install this target in. */
1001 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1002 {
1003 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1004 break;
1005 }
1006
1007 /* If there's already targets at this stratum, remove them. */
1008 /* FIXME: cagney/2003-10-15: I think this should be popping all
1009 targets to CUR, and not just those at this stratum level. */
1010 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1011 {
1012 /* There's already something at this stratum level. Close it,
1013 and un-hook it from the stack. */
1014 struct target_ops *tmp = (*cur);
1015
1016 (*cur) = (*cur)->beneath;
1017 tmp->beneath = NULL;
1018 target_close (tmp, 0);
1019 }
1020
1021 /* We have removed all targets in our stratum, now add the new one. */
1022 t->beneath = (*cur);
1023 (*cur) = t;
1024
1025 update_current_target ();
1026 }
1027
1028 /* Remove a target_ops vector from the stack, wherever it may be.
1029 Return how many times it was removed (0 or 1). */
1030
1031 int
1032 unpush_target (struct target_ops *t)
1033 {
1034 struct target_ops **cur;
1035 struct target_ops *tmp;
1036
1037 if (t->to_stratum == dummy_stratum)
1038 internal_error (__FILE__, __LINE__,
1039 _("Attempt to unpush the dummy target"));
1040
1041 /* Look for the specified target. Note that we assume that a target
1042 can only occur once in the target stack. */
1043
1044 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1045 {
1046 if ((*cur) == t)
1047 break;
1048 }
1049
1050 /* If we don't find target_ops, quit. Only open targets should be
1051 closed. */
1052 if ((*cur) == NULL)
1053 return 0;
1054
1055 /* Unchain the target. */
1056 tmp = (*cur);
1057 (*cur) = (*cur)->beneath;
1058 tmp->beneath = NULL;
1059
1060 update_current_target ();
1061
1062 /* Finally close the target. Note we do this after unchaining, so
1063 any target method calls from within the target_close
1064 implementation don't end up in T anymore. */
1065 target_close (t, 0);
1066
1067 return 1;
1068 }
1069
1070 void
1071 pop_target (void)
1072 {
1073 target_close (target_stack, 0); /* Let it clean up. */
1074 if (unpush_target (target_stack) == 1)
1075 return;
1076
1077 fprintf_unfiltered (gdb_stderr,
1078 "pop_target couldn't find target %s\n",
1079 current_target.to_shortname);
1080 internal_error (__FILE__, __LINE__,
1081 _("failed internal consistency check"));
1082 }
1083
1084 void
1085 pop_all_targets_above (enum strata above_stratum, int quitting)
1086 {
1087 while ((int) (current_target.to_stratum) > (int) above_stratum)
1088 {
1089 target_close (target_stack, quitting);
1090 if (!unpush_target (target_stack))
1091 {
1092 fprintf_unfiltered (gdb_stderr,
1093 "pop_all_targets couldn't find target %s\n",
1094 target_stack->to_shortname);
1095 internal_error (__FILE__, __LINE__,
1096 _("failed internal consistency check"));
1097 break;
1098 }
1099 }
1100 }
1101
1102 void
1103 pop_all_targets (int quitting)
1104 {
1105 pop_all_targets_above (dummy_stratum, quitting);
1106 }
1107
1108 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1109
1110 int
1111 target_is_pushed (struct target_ops *t)
1112 {
1113 struct target_ops **cur;
1114
1115 /* Check magic number. If wrong, it probably means someone changed
1116 the struct definition, but not all the places that initialize one. */
1117 if (t->to_magic != OPS_MAGIC)
1118 {
1119 fprintf_unfiltered (gdb_stderr,
1120 "Magic number of %s target struct wrong\n",
1121 t->to_shortname);
1122 internal_error (__FILE__, __LINE__,
1123 _("failed internal consistency check"));
1124 }
1125
1126 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1127 if (*cur == t)
1128 return 1;
1129
1130 return 0;
1131 }
1132
1133 /* Using the objfile specified in OBJFILE, find the address for the
1134 current thread's thread-local storage with offset OFFSET. */
1135 CORE_ADDR
1136 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1137 {
1138 volatile CORE_ADDR addr = 0;
1139 struct target_ops *target;
1140
1141 for (target = current_target.beneath;
1142 target != NULL;
1143 target = target->beneath)
1144 {
1145 if (target->to_get_thread_local_address != NULL)
1146 break;
1147 }
1148
1149 if (target != NULL
1150 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1151 {
1152 ptid_t ptid = inferior_ptid;
1153 volatile struct gdb_exception ex;
1154
1155 TRY_CATCH (ex, RETURN_MASK_ALL)
1156 {
1157 CORE_ADDR lm_addr;
1158
1159 /* Fetch the load module address for this objfile. */
1160 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1161 objfile);
1162 /* If it's 0, throw the appropriate exception. */
1163 if (lm_addr == 0)
1164 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1165 _("TLS load module not found"));
1166
1167 addr = target->to_get_thread_local_address (target, ptid,
1168 lm_addr, offset);
1169 }
1170 /* If an error occurred, print TLS related messages here. Otherwise,
1171 throw the error to some higher catcher. */
1172 if (ex.reason < 0)
1173 {
1174 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1175
1176 switch (ex.error)
1177 {
1178 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1179 error (_("Cannot find thread-local variables "
1180 "in this thread library."));
1181 break;
1182 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1183 if (objfile_is_library)
1184 error (_("Cannot find shared library `%s' in dynamic"
1185 " linker's load module list"), objfile->name);
1186 else
1187 error (_("Cannot find executable file `%s' in dynamic"
1188 " linker's load module list"), objfile->name);
1189 break;
1190 case TLS_NOT_ALLOCATED_YET_ERROR:
1191 if (objfile_is_library)
1192 error (_("The inferior has not yet allocated storage for"
1193 " thread-local variables in\n"
1194 "the shared library `%s'\n"
1195 "for %s"),
1196 objfile->name, target_pid_to_str (ptid));
1197 else
1198 error (_("The inferior has not yet allocated storage for"
1199 " thread-local variables in\n"
1200 "the executable `%s'\n"
1201 "for %s"),
1202 objfile->name, target_pid_to_str (ptid));
1203 break;
1204 case TLS_GENERIC_ERROR:
1205 if (objfile_is_library)
1206 error (_("Cannot find thread-local storage for %s, "
1207 "shared library %s:\n%s"),
1208 target_pid_to_str (ptid),
1209 objfile->name, ex.message);
1210 else
1211 error (_("Cannot find thread-local storage for %s, "
1212 "executable file %s:\n%s"),
1213 target_pid_to_str (ptid),
1214 objfile->name, ex.message);
1215 break;
1216 default:
1217 throw_exception (ex);
1218 break;
1219 }
1220 }
1221 }
1222 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1223 TLS is an ABI-specific thing. But we don't do that yet. */
1224 else
1225 error (_("Cannot find thread-local variables on this target"));
1226
1227 return addr;
1228 }
1229
1230 #undef MIN
1231 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1232
1233 /* target_read_string -- read a null terminated string, up to LEN bytes,
1234 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1235 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1236 is responsible for freeing it. Return the number of bytes successfully
1237 read. */
1238
1239 int
1240 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1241 {
1242 int tlen, offset, i;
1243 gdb_byte buf[4];
1244 int errcode = 0;
1245 char *buffer;
1246 int buffer_allocated;
1247 char *bufptr;
1248 unsigned int nbytes_read = 0;
1249
1250 gdb_assert (string);
1251
1252 /* Small for testing. */
1253 buffer_allocated = 4;
1254 buffer = xmalloc (buffer_allocated);
1255 bufptr = buffer;
1256
1257 while (len > 0)
1258 {
1259 tlen = MIN (len, 4 - (memaddr & 3));
1260 offset = memaddr & 3;
1261
1262 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1263 if (errcode != 0)
1264 {
1265 /* The transfer request might have crossed the boundary to an
1266 unallocated region of memory. Retry the transfer, requesting
1267 a single byte. */
1268 tlen = 1;
1269 offset = 0;
1270 errcode = target_read_memory (memaddr, buf, 1);
1271 if (errcode != 0)
1272 goto done;
1273 }
1274
1275 if (bufptr - buffer + tlen > buffer_allocated)
1276 {
1277 unsigned int bytes;
1278
1279 bytes = bufptr - buffer;
1280 buffer_allocated *= 2;
1281 buffer = xrealloc (buffer, buffer_allocated);
1282 bufptr = buffer + bytes;
1283 }
1284
1285 for (i = 0; i < tlen; i++)
1286 {
1287 *bufptr++ = buf[i + offset];
1288 if (buf[i + offset] == '\000')
1289 {
1290 nbytes_read += i + 1;
1291 goto done;
1292 }
1293 }
1294
1295 memaddr += tlen;
1296 len -= tlen;
1297 nbytes_read += tlen;
1298 }
1299 done:
1300 *string = buffer;
1301 if (errnop != NULL)
1302 *errnop = errcode;
1303 return nbytes_read;
1304 }
1305
1306 struct target_section_table *
1307 target_get_section_table (struct target_ops *target)
1308 {
1309 struct target_ops *t;
1310
1311 if (targetdebug)
1312 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1313
1314 for (t = target; t != NULL; t = t->beneath)
1315 if (t->to_get_section_table != NULL)
1316 return (*t->to_get_section_table) (t);
1317
1318 return NULL;
1319 }
1320
1321 /* Find a section containing ADDR. */
1322
1323 struct target_section *
1324 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1325 {
1326 struct target_section_table *table = target_get_section_table (target);
1327 struct target_section *secp;
1328
1329 if (table == NULL)
1330 return NULL;
1331
1332 for (secp = table->sections; secp < table->sections_end; secp++)
1333 {
1334 if (addr >= secp->addr && addr < secp->endaddr)
1335 return secp;
1336 }
1337 return NULL;
1338 }
1339
1340 /* Read memory from the live target, even if currently inspecting a
1341 traceframe. The return is the same as that of target_read. */
1342
1343 static LONGEST
1344 target_read_live_memory (enum target_object object,
1345 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1346 {
1347 int ret;
1348 struct cleanup *cleanup;
1349
1350 /* Switch momentarily out of tfind mode so to access live memory.
1351 Note that this must not clear global state, such as the frame
1352 cache, which must still remain valid for the previous traceframe.
1353 We may be _building_ the frame cache at this point. */
1354 cleanup = make_cleanup_restore_traceframe_number ();
1355 set_traceframe_number (-1);
1356
1357 ret = target_read (current_target.beneath, object, NULL,
1358 myaddr, memaddr, len);
1359
1360 do_cleanups (cleanup);
1361 return ret;
1362 }
1363
1364 /* Using the set of read-only target sections of OPS, read live
1365 read-only memory. Note that the actual reads start from the
1366 top-most target again.
1367
1368 For interface/parameters/return description see target.h,
1369 to_xfer_partial. */
1370
1371 static LONGEST
1372 memory_xfer_live_readonly_partial (struct target_ops *ops,
1373 enum target_object object,
1374 gdb_byte *readbuf, ULONGEST memaddr,
1375 LONGEST len)
1376 {
1377 struct target_section *secp;
1378 struct target_section_table *table;
1379
1380 secp = target_section_by_addr (ops, memaddr);
1381 if (secp != NULL
1382 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1383 & SEC_READONLY))
1384 {
1385 struct target_section *p;
1386 ULONGEST memend = memaddr + len;
1387
1388 table = target_get_section_table (ops);
1389
1390 for (p = table->sections; p < table->sections_end; p++)
1391 {
1392 if (memaddr >= p->addr)
1393 {
1394 if (memend <= p->endaddr)
1395 {
1396 /* Entire transfer is within this section. */
1397 return target_read_live_memory (object, memaddr,
1398 readbuf, len);
1399 }
1400 else if (memaddr >= p->endaddr)
1401 {
1402 /* This section ends before the transfer starts. */
1403 continue;
1404 }
1405 else
1406 {
1407 /* This section overlaps the transfer. Just do half. */
1408 len = p->endaddr - memaddr;
1409 return target_read_live_memory (object, memaddr,
1410 readbuf, len);
1411 }
1412 }
1413 }
1414 }
1415
1416 return 0;
1417 }
1418
1419 /* Perform a partial memory transfer.
1420 For docs see target.h, to_xfer_partial. */
1421
1422 static LONGEST
1423 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1424 void *readbuf, const void *writebuf, ULONGEST memaddr,
1425 LONGEST len)
1426 {
1427 LONGEST res;
1428 int reg_len;
1429 struct mem_region *region;
1430 struct inferior *inf;
1431
1432 /* For accesses to unmapped overlay sections, read directly from
1433 files. Must do this first, as MEMADDR may need adjustment. */
1434 if (readbuf != NULL && overlay_debugging)
1435 {
1436 struct obj_section *section = find_pc_overlay (memaddr);
1437
1438 if (pc_in_unmapped_range (memaddr, section))
1439 {
1440 struct target_section_table *table
1441 = target_get_section_table (ops);
1442 const char *section_name = section->the_bfd_section->name;
1443
1444 memaddr = overlay_mapped_address (memaddr, section);
1445 return section_table_xfer_memory_partial (readbuf, writebuf,
1446 memaddr, len,
1447 table->sections,
1448 table->sections_end,
1449 section_name);
1450 }
1451 }
1452
1453 /* Try the executable files, if "trust-readonly-sections" is set. */
1454 if (readbuf != NULL && trust_readonly)
1455 {
1456 struct target_section *secp;
1457 struct target_section_table *table;
1458
1459 secp = target_section_by_addr (ops, memaddr);
1460 if (secp != NULL
1461 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1462 & SEC_READONLY))
1463 {
1464 table = target_get_section_table (ops);
1465 return section_table_xfer_memory_partial (readbuf, writebuf,
1466 memaddr, len,
1467 table->sections,
1468 table->sections_end,
1469 NULL);
1470 }
1471 }
1472
1473 /* If reading unavailable memory in the context of traceframes, and
1474 this address falls within a read-only section, fallback to
1475 reading from live memory. */
1476 if (readbuf != NULL && get_traceframe_number () != -1)
1477 {
1478 VEC(mem_range_s) *available;
1479
1480 /* If we fail to get the set of available memory, then the
1481 target does not support querying traceframe info, and so we
1482 attempt reading from the traceframe anyway (assuming the
1483 target implements the old QTro packet then). */
1484 if (traceframe_available_memory (&available, memaddr, len))
1485 {
1486 struct cleanup *old_chain;
1487
1488 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1489
1490 if (VEC_empty (mem_range_s, available)
1491 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1492 {
1493 /* Don't read into the traceframe's available
1494 memory. */
1495 if (!VEC_empty (mem_range_s, available))
1496 {
1497 LONGEST oldlen = len;
1498
1499 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1500 gdb_assert (len <= oldlen);
1501 }
1502
1503 do_cleanups (old_chain);
1504
1505 /* This goes through the topmost target again. */
1506 res = memory_xfer_live_readonly_partial (ops, object,
1507 readbuf, memaddr, len);
1508 if (res > 0)
1509 return res;
1510
1511 /* No use trying further, we know some memory starting
1512 at MEMADDR isn't available. */
1513 return -1;
1514 }
1515
1516 /* Don't try to read more than how much is available, in
1517 case the target implements the deprecated QTro packet to
1518 cater for older GDBs (the target's knowledge of read-only
1519 sections may be outdated by now). */
1520 len = VEC_index (mem_range_s, available, 0)->length;
1521
1522 do_cleanups (old_chain);
1523 }
1524 }
1525
1526 /* Try GDB's internal data cache. */
1527 region = lookup_mem_region (memaddr);
1528 /* region->hi == 0 means there's no upper bound. */
1529 if (memaddr + len < region->hi || region->hi == 0)
1530 reg_len = len;
1531 else
1532 reg_len = region->hi - memaddr;
1533
1534 switch (region->attrib.mode)
1535 {
1536 case MEM_RO:
1537 if (writebuf != NULL)
1538 return -1;
1539 break;
1540
1541 case MEM_WO:
1542 if (readbuf != NULL)
1543 return -1;
1544 break;
1545
1546 case MEM_FLASH:
1547 /* We only support writing to flash during "load" for now. */
1548 if (writebuf != NULL)
1549 error (_("Writing to flash memory forbidden in this context"));
1550 break;
1551
1552 case MEM_NONE:
1553 return -1;
1554 }
1555
1556 if (!ptid_equal (inferior_ptid, null_ptid))
1557 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1558 else
1559 inf = NULL;
1560
1561 if (inf != NULL
1562 /* The dcache reads whole cache lines; that doesn't play well
1563 with reading from a trace buffer, because reading outside of
1564 the collected memory range fails. */
1565 && get_traceframe_number () == -1
1566 && (region->attrib.cache
1567 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1568 {
1569 if (readbuf != NULL)
1570 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1571 reg_len, 0);
1572 else
1573 /* FIXME drow/2006-08-09: If we're going to preserve const
1574 correctness dcache_xfer_memory should take readbuf and
1575 writebuf. */
1576 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1577 (void *) writebuf,
1578 reg_len, 1);
1579 if (res <= 0)
1580 return -1;
1581 else
1582 return res;
1583 }
1584
1585 /* If none of those methods found the memory we wanted, fall back
1586 to a target partial transfer. Normally a single call to
1587 to_xfer_partial is enough; if it doesn't recognize an object
1588 it will call the to_xfer_partial of the next target down.
1589 But for memory this won't do. Memory is the only target
1590 object which can be read from more than one valid target.
1591 A core file, for instance, could have some of memory but
1592 delegate other bits to the target below it. So, we must
1593 manually try all targets. */
1594
1595 do
1596 {
1597 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1598 readbuf, writebuf, memaddr, reg_len);
1599 if (res > 0)
1600 break;
1601
1602 /* We want to continue past core files to executables, but not
1603 past a running target's memory. */
1604 if (ops->to_has_all_memory (ops))
1605 break;
1606
1607 ops = ops->beneath;
1608 }
1609 while (ops != NULL);
1610
1611 /* Make sure the cache gets updated no matter what - if we are writing
1612 to the stack. Even if this write is not tagged as such, we still need
1613 to update the cache. */
1614
1615 if (res > 0
1616 && inf != NULL
1617 && writebuf != NULL
1618 && !region->attrib.cache
1619 && stack_cache_enabled_p
1620 && object != TARGET_OBJECT_STACK_MEMORY)
1621 {
1622 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1623 }
1624
1625 /* If we still haven't got anything, return the last error. We
1626 give up. */
1627 return res;
1628 }
1629
1630 /* Perform a partial memory transfer. For docs see target.h,
1631 to_xfer_partial. */
1632
1633 static LONGEST
1634 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1635 void *readbuf, const void *writebuf, ULONGEST memaddr,
1636 LONGEST len)
1637 {
1638 int res;
1639
1640 /* Zero length requests are ok and require no work. */
1641 if (len == 0)
1642 return 0;
1643
1644 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1645 breakpoint insns, thus hiding out from higher layers whether
1646 there are software breakpoints inserted in the code stream. */
1647 if (readbuf != NULL)
1648 {
1649 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1650
1651 if (res > 0 && !show_memory_breakpoints)
1652 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1653 }
1654 else
1655 {
1656 void *buf;
1657 struct cleanup *old_chain;
1658
1659 buf = xmalloc (len);
1660 old_chain = make_cleanup (xfree, buf);
1661 memcpy (buf, writebuf, len);
1662
1663 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1664 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1665
1666 do_cleanups (old_chain);
1667 }
1668
1669 return res;
1670 }
1671
1672 static void
1673 restore_show_memory_breakpoints (void *arg)
1674 {
1675 show_memory_breakpoints = (uintptr_t) arg;
1676 }
1677
1678 struct cleanup *
1679 make_show_memory_breakpoints_cleanup (int show)
1680 {
1681 int current = show_memory_breakpoints;
1682
1683 show_memory_breakpoints = show;
1684 return make_cleanup (restore_show_memory_breakpoints,
1685 (void *) (uintptr_t) current);
1686 }
1687
1688 /* For docs see target.h, to_xfer_partial. */
1689
1690 static LONGEST
1691 target_xfer_partial (struct target_ops *ops,
1692 enum target_object object, const char *annex,
1693 void *readbuf, const void *writebuf,
1694 ULONGEST offset, LONGEST len)
1695 {
1696 LONGEST retval;
1697
1698 gdb_assert (ops->to_xfer_partial != NULL);
1699
1700 if (writebuf && !may_write_memory)
1701 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1702 core_addr_to_string_nz (offset), plongest (len));
1703
1704 /* If this is a memory transfer, let the memory-specific code
1705 have a look at it instead. Memory transfers are more
1706 complicated. */
1707 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1708 retval = memory_xfer_partial (ops, object, readbuf,
1709 writebuf, offset, len);
1710 else
1711 {
1712 enum target_object raw_object = object;
1713
1714 /* If this is a raw memory transfer, request the normal
1715 memory object from other layers. */
1716 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1717 raw_object = TARGET_OBJECT_MEMORY;
1718
1719 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1720 writebuf, offset, len);
1721 }
1722
1723 if (targetdebug)
1724 {
1725 const unsigned char *myaddr = NULL;
1726
1727 fprintf_unfiltered (gdb_stdlog,
1728 "%s:target_xfer_partial "
1729 "(%d, %s, %s, %s, %s, %s) = %s",
1730 ops->to_shortname,
1731 (int) object,
1732 (annex ? annex : "(null)"),
1733 host_address_to_string (readbuf),
1734 host_address_to_string (writebuf),
1735 core_addr_to_string_nz (offset),
1736 plongest (len), plongest (retval));
1737
1738 if (readbuf)
1739 myaddr = readbuf;
1740 if (writebuf)
1741 myaddr = writebuf;
1742 if (retval > 0 && myaddr != NULL)
1743 {
1744 int i;
1745
1746 fputs_unfiltered (", bytes =", gdb_stdlog);
1747 for (i = 0; i < retval; i++)
1748 {
1749 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1750 {
1751 if (targetdebug < 2 && i > 0)
1752 {
1753 fprintf_unfiltered (gdb_stdlog, " ...");
1754 break;
1755 }
1756 fprintf_unfiltered (gdb_stdlog, "\n");
1757 }
1758
1759 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1760 }
1761 }
1762
1763 fputc_unfiltered ('\n', gdb_stdlog);
1764 }
1765 return retval;
1766 }
1767
1768 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1769 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1770 if any error occurs.
1771
1772 If an error occurs, no guarantee is made about the contents of the data at
1773 MYADDR. In particular, the caller should not depend upon partial reads
1774 filling the buffer with good data. There is no way for the caller to know
1775 how much good data might have been transfered anyway. Callers that can
1776 deal with partial reads should call target_read (which will retry until
1777 it makes no progress, and then return how much was transferred). */
1778
1779 int
1780 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1781 {
1782 /* Dispatch to the topmost target, not the flattened current_target.
1783 Memory accesses check target->to_has_(all_)memory, and the
1784 flattened target doesn't inherit those. */
1785 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1786 myaddr, memaddr, len) == len)
1787 return 0;
1788 else
1789 return EIO;
1790 }
1791
1792 /* Like target_read_memory, but specify explicitly that this is a read from
1793 the target's stack. This may trigger different cache behavior. */
1794
1795 int
1796 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1797 {
1798 /* Dispatch to the topmost target, not the flattened current_target.
1799 Memory accesses check target->to_has_(all_)memory, and the
1800 flattened target doesn't inherit those. */
1801
1802 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1803 myaddr, memaddr, len) == len)
1804 return 0;
1805 else
1806 return EIO;
1807 }
1808
1809 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1810 Returns either 0 for success or an errno value if any error occurs.
1811 If an error occurs, no guarantee is made about how much data got written.
1812 Callers that can deal with partial writes should call target_write. */
1813
1814 int
1815 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1816 {
1817 /* Dispatch to the topmost target, not the flattened current_target.
1818 Memory accesses check target->to_has_(all_)memory, and the
1819 flattened target doesn't inherit those. */
1820 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1821 myaddr, memaddr, len) == len)
1822 return 0;
1823 else
1824 return EIO;
1825 }
1826
1827 /* Write LEN bytes from MYADDR to target raw memory at address
1828 MEMADDR. Returns either 0 for success or an errno value if any
1829 error occurs. If an error occurs, no guarantee is made about how
1830 much data got written. Callers that can deal with partial writes
1831 should call target_write. */
1832
1833 int
1834 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1835 {
1836 /* Dispatch to the topmost target, not the flattened current_target.
1837 Memory accesses check target->to_has_(all_)memory, and the
1838 flattened target doesn't inherit those. */
1839 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1840 myaddr, memaddr, len) == len)
1841 return 0;
1842 else
1843 return EIO;
1844 }
1845
1846 /* Fetch the target's memory map. */
1847
1848 VEC(mem_region_s) *
1849 target_memory_map (void)
1850 {
1851 VEC(mem_region_s) *result;
1852 struct mem_region *last_one, *this_one;
1853 int ix;
1854 struct target_ops *t;
1855
1856 if (targetdebug)
1857 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1858
1859 for (t = current_target.beneath; t != NULL; t = t->beneath)
1860 if (t->to_memory_map != NULL)
1861 break;
1862
1863 if (t == NULL)
1864 return NULL;
1865
1866 result = t->to_memory_map (t);
1867 if (result == NULL)
1868 return NULL;
1869
1870 qsort (VEC_address (mem_region_s, result),
1871 VEC_length (mem_region_s, result),
1872 sizeof (struct mem_region), mem_region_cmp);
1873
1874 /* Check that regions do not overlap. Simultaneously assign
1875 a numbering for the "mem" commands to use to refer to
1876 each region. */
1877 last_one = NULL;
1878 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1879 {
1880 this_one->number = ix;
1881
1882 if (last_one && last_one->hi > this_one->lo)
1883 {
1884 warning (_("Overlapping regions in memory map: ignoring"));
1885 VEC_free (mem_region_s, result);
1886 return NULL;
1887 }
1888 last_one = this_one;
1889 }
1890
1891 return result;
1892 }
1893
1894 void
1895 target_flash_erase (ULONGEST address, LONGEST length)
1896 {
1897 struct target_ops *t;
1898
1899 for (t = current_target.beneath; t != NULL; t = t->beneath)
1900 if (t->to_flash_erase != NULL)
1901 {
1902 if (targetdebug)
1903 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1904 hex_string (address), phex (length, 0));
1905 t->to_flash_erase (t, address, length);
1906 return;
1907 }
1908
1909 tcomplain ();
1910 }
1911
1912 void
1913 target_flash_done (void)
1914 {
1915 struct target_ops *t;
1916
1917 for (t = current_target.beneath; t != NULL; t = t->beneath)
1918 if (t->to_flash_done != NULL)
1919 {
1920 if (targetdebug)
1921 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1922 t->to_flash_done (t);
1923 return;
1924 }
1925
1926 tcomplain ();
1927 }
1928
1929 static void
1930 show_trust_readonly (struct ui_file *file, int from_tty,
1931 struct cmd_list_element *c, const char *value)
1932 {
1933 fprintf_filtered (file,
1934 _("Mode for reading from readonly sections is %s.\n"),
1935 value);
1936 }
1937
1938 /* More generic transfers. */
1939
1940 static LONGEST
1941 default_xfer_partial (struct target_ops *ops, enum target_object object,
1942 const char *annex, gdb_byte *readbuf,
1943 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1944 {
1945 if (object == TARGET_OBJECT_MEMORY
1946 && ops->deprecated_xfer_memory != NULL)
1947 /* If available, fall back to the target's
1948 "deprecated_xfer_memory" method. */
1949 {
1950 int xfered = -1;
1951
1952 errno = 0;
1953 if (writebuf != NULL)
1954 {
1955 void *buffer = xmalloc (len);
1956 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1957
1958 memcpy (buffer, writebuf, len);
1959 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1960 1/*write*/, NULL, ops);
1961 do_cleanups (cleanup);
1962 }
1963 if (readbuf != NULL)
1964 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1965 0/*read*/, NULL, ops);
1966 if (xfered > 0)
1967 return xfered;
1968 else if (xfered == 0 && errno == 0)
1969 /* "deprecated_xfer_memory" uses 0, cross checked against
1970 ERRNO as one indication of an error. */
1971 return 0;
1972 else
1973 return -1;
1974 }
1975 else if (ops->beneath != NULL)
1976 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1977 readbuf, writebuf, offset, len);
1978 else
1979 return -1;
1980 }
1981
1982 /* The xfer_partial handler for the topmost target. Unlike the default,
1983 it does not need to handle memory specially; it just passes all
1984 requests down the stack. */
1985
1986 static LONGEST
1987 current_xfer_partial (struct target_ops *ops, enum target_object object,
1988 const char *annex, gdb_byte *readbuf,
1989 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1990 {
1991 if (ops->beneath != NULL)
1992 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1993 readbuf, writebuf, offset, len);
1994 else
1995 return -1;
1996 }
1997
1998 /* Target vector read/write partial wrapper functions. */
1999
2000 static LONGEST
2001 target_read_partial (struct target_ops *ops,
2002 enum target_object object,
2003 const char *annex, gdb_byte *buf,
2004 ULONGEST offset, LONGEST len)
2005 {
2006 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2007 }
2008
2009 static LONGEST
2010 target_write_partial (struct target_ops *ops,
2011 enum target_object object,
2012 const char *annex, const gdb_byte *buf,
2013 ULONGEST offset, LONGEST len)
2014 {
2015 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2016 }
2017
2018 /* Wrappers to perform the full transfer. */
2019
2020 /* For docs on target_read see target.h. */
2021
2022 LONGEST
2023 target_read (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027 {
2028 LONGEST xfered = 0;
2029
2030 while (xfered < len)
2031 {
2032 LONGEST xfer = target_read_partial (ops, object, annex,
2033 (gdb_byte *) buf + xfered,
2034 offset + xfered, len - xfered);
2035
2036 /* Call an observer, notifying them of the xfer progress? */
2037 if (xfer == 0)
2038 return xfered;
2039 if (xfer < 0)
2040 return -1;
2041 xfered += xfer;
2042 QUIT;
2043 }
2044 return len;
2045 }
2046
2047 /* Assuming that the entire [begin, end) range of memory cannot be
2048 read, try to read whatever subrange is possible to read.
2049
2050 The function returns, in RESULT, either zero or one memory block.
2051 If there's a readable subrange at the beginning, it is completely
2052 read and returned. Any further readable subrange will not be read.
2053 Otherwise, if there's a readable subrange at the end, it will be
2054 completely read and returned. Any readable subranges before it
2055 (obviously, not starting at the beginning), will be ignored. In
2056 other cases -- either no readable subrange, or readable subrange(s)
2057 that is neither at the beginning, or end, nothing is returned.
2058
2059 The purpose of this function is to handle a read across a boundary
2060 of accessible memory in a case when memory map is not available.
2061 The above restrictions are fine for this case, but will give
2062 incorrect results if the memory is 'patchy'. However, supporting
2063 'patchy' memory would require trying to read every single byte,
2064 and it seems unacceptable solution. Explicit memory map is
2065 recommended for this case -- and target_read_memory_robust will
2066 take care of reading multiple ranges then. */
2067
2068 static void
2069 read_whatever_is_readable (struct target_ops *ops,
2070 ULONGEST begin, ULONGEST end,
2071 VEC(memory_read_result_s) **result)
2072 {
2073 gdb_byte *buf = xmalloc (end - begin);
2074 ULONGEST current_begin = begin;
2075 ULONGEST current_end = end;
2076 int forward;
2077 memory_read_result_s r;
2078
2079 /* If we previously failed to read 1 byte, nothing can be done here. */
2080 if (end - begin <= 1)
2081 {
2082 xfree (buf);
2083 return;
2084 }
2085
2086 /* Check that either first or the last byte is readable, and give up
2087 if not. This heuristic is meant to permit reading accessible memory
2088 at the boundary of accessible region. */
2089 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2090 buf, begin, 1) == 1)
2091 {
2092 forward = 1;
2093 ++current_begin;
2094 }
2095 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2096 buf + (end-begin) - 1, end - 1, 1) == 1)
2097 {
2098 forward = 0;
2099 --current_end;
2100 }
2101 else
2102 {
2103 xfree (buf);
2104 return;
2105 }
2106
2107 /* Loop invariant is that the [current_begin, current_end) was previously
2108 found to be not readable as a whole.
2109
2110 Note loop condition -- if the range has 1 byte, we can't divide the range
2111 so there's no point trying further. */
2112 while (current_end - current_begin > 1)
2113 {
2114 ULONGEST first_half_begin, first_half_end;
2115 ULONGEST second_half_begin, second_half_end;
2116 LONGEST xfer;
2117 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2118
2119 if (forward)
2120 {
2121 first_half_begin = current_begin;
2122 first_half_end = middle;
2123 second_half_begin = middle;
2124 second_half_end = current_end;
2125 }
2126 else
2127 {
2128 first_half_begin = middle;
2129 first_half_end = current_end;
2130 second_half_begin = current_begin;
2131 second_half_end = middle;
2132 }
2133
2134 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2135 buf + (first_half_begin - begin),
2136 first_half_begin,
2137 first_half_end - first_half_begin);
2138
2139 if (xfer == first_half_end - first_half_begin)
2140 {
2141 /* This half reads up fine. So, the error must be in the
2142 other half. */
2143 current_begin = second_half_begin;
2144 current_end = second_half_end;
2145 }
2146 else
2147 {
2148 /* This half is not readable. Because we've tried one byte, we
2149 know some part of this half if actually redable. Go to the next
2150 iteration to divide again and try to read.
2151
2152 We don't handle the other half, because this function only tries
2153 to read a single readable subrange. */
2154 current_begin = first_half_begin;
2155 current_end = first_half_end;
2156 }
2157 }
2158
2159 if (forward)
2160 {
2161 /* The [begin, current_begin) range has been read. */
2162 r.begin = begin;
2163 r.end = current_begin;
2164 r.data = buf;
2165 }
2166 else
2167 {
2168 /* The [current_end, end) range has been read. */
2169 LONGEST rlen = end - current_end;
2170
2171 r.data = xmalloc (rlen);
2172 memcpy (r.data, buf + current_end - begin, rlen);
2173 r.begin = current_end;
2174 r.end = end;
2175 xfree (buf);
2176 }
2177 VEC_safe_push(memory_read_result_s, (*result), &r);
2178 }
2179
2180 void
2181 free_memory_read_result_vector (void *x)
2182 {
2183 VEC(memory_read_result_s) *v = x;
2184 memory_read_result_s *current;
2185 int ix;
2186
2187 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2188 {
2189 xfree (current->data);
2190 }
2191 VEC_free (memory_read_result_s, v);
2192 }
2193
2194 VEC(memory_read_result_s) *
2195 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2196 {
2197 VEC(memory_read_result_s) *result = 0;
2198
2199 LONGEST xfered = 0;
2200 while (xfered < len)
2201 {
2202 struct mem_region *region = lookup_mem_region (offset + xfered);
2203 LONGEST rlen;
2204
2205 /* If there is no explicit region, a fake one should be created. */
2206 gdb_assert (region);
2207
2208 if (region->hi == 0)
2209 rlen = len - xfered;
2210 else
2211 rlen = region->hi - offset;
2212
2213 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2214 {
2215 /* Cannot read this region. Note that we can end up here only
2216 if the region is explicitly marked inaccessible, or
2217 'inaccessible-by-default' is in effect. */
2218 xfered += rlen;
2219 }
2220 else
2221 {
2222 LONGEST to_read = min (len - xfered, rlen);
2223 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2224
2225 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2226 (gdb_byte *) buffer,
2227 offset + xfered, to_read);
2228 /* Call an observer, notifying them of the xfer progress? */
2229 if (xfer <= 0)
2230 {
2231 /* Got an error reading full chunk. See if maybe we can read
2232 some subrange. */
2233 xfree (buffer);
2234 read_whatever_is_readable (ops, offset + xfered,
2235 offset + xfered + to_read, &result);
2236 xfered += to_read;
2237 }
2238 else
2239 {
2240 struct memory_read_result r;
2241 r.data = buffer;
2242 r.begin = offset + xfered;
2243 r.end = r.begin + xfer;
2244 VEC_safe_push (memory_read_result_s, result, &r);
2245 xfered += xfer;
2246 }
2247 QUIT;
2248 }
2249 }
2250 return result;
2251 }
2252
2253
2254 /* An alternative to target_write with progress callbacks. */
2255
2256 LONGEST
2257 target_write_with_progress (struct target_ops *ops,
2258 enum target_object object,
2259 const char *annex, const gdb_byte *buf,
2260 ULONGEST offset, LONGEST len,
2261 void (*progress) (ULONGEST, void *), void *baton)
2262 {
2263 LONGEST xfered = 0;
2264
2265 /* Give the progress callback a chance to set up. */
2266 if (progress)
2267 (*progress) (0, baton);
2268
2269 while (xfered < len)
2270 {
2271 LONGEST xfer = target_write_partial (ops, object, annex,
2272 (gdb_byte *) buf + xfered,
2273 offset + xfered, len - xfered);
2274
2275 if (xfer == 0)
2276 return xfered;
2277 if (xfer < 0)
2278 return -1;
2279
2280 if (progress)
2281 (*progress) (xfer, baton);
2282
2283 xfered += xfer;
2284 QUIT;
2285 }
2286 return len;
2287 }
2288
2289 /* For docs on target_write see target.h. */
2290
2291 LONGEST
2292 target_write (struct target_ops *ops,
2293 enum target_object object,
2294 const char *annex, const gdb_byte *buf,
2295 ULONGEST offset, LONGEST len)
2296 {
2297 return target_write_with_progress (ops, object, annex, buf, offset, len,
2298 NULL, NULL);
2299 }
2300
2301 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2302 the size of the transferred data. PADDING additional bytes are
2303 available in *BUF_P. This is a helper function for
2304 target_read_alloc; see the declaration of that function for more
2305 information. */
2306
2307 static LONGEST
2308 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2309 const char *annex, gdb_byte **buf_p, int padding)
2310 {
2311 size_t buf_alloc, buf_pos;
2312 gdb_byte *buf;
2313 LONGEST n;
2314
2315 /* This function does not have a length parameter; it reads the
2316 entire OBJECT). Also, it doesn't support objects fetched partly
2317 from one target and partly from another (in a different stratum,
2318 e.g. a core file and an executable). Both reasons make it
2319 unsuitable for reading memory. */
2320 gdb_assert (object != TARGET_OBJECT_MEMORY);
2321
2322 /* Start by reading up to 4K at a time. The target will throttle
2323 this number down if necessary. */
2324 buf_alloc = 4096;
2325 buf = xmalloc (buf_alloc);
2326 buf_pos = 0;
2327 while (1)
2328 {
2329 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2330 buf_pos, buf_alloc - buf_pos - padding);
2331 if (n < 0)
2332 {
2333 /* An error occurred. */
2334 xfree (buf);
2335 return -1;
2336 }
2337 else if (n == 0)
2338 {
2339 /* Read all there was. */
2340 if (buf_pos == 0)
2341 xfree (buf);
2342 else
2343 *buf_p = buf;
2344 return buf_pos;
2345 }
2346
2347 buf_pos += n;
2348
2349 /* If the buffer is filling up, expand it. */
2350 if (buf_alloc < buf_pos * 2)
2351 {
2352 buf_alloc *= 2;
2353 buf = xrealloc (buf, buf_alloc);
2354 }
2355
2356 QUIT;
2357 }
2358 }
2359
2360 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2361 the size of the transferred data. See the declaration in "target.h"
2362 function for more information about the return value. */
2363
2364 LONGEST
2365 target_read_alloc (struct target_ops *ops, enum target_object object,
2366 const char *annex, gdb_byte **buf_p)
2367 {
2368 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2369 }
2370
2371 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2372 returned as a string, allocated using xmalloc. If an error occurs
2373 or the transfer is unsupported, NULL is returned. Empty objects
2374 are returned as allocated but empty strings. A warning is issued
2375 if the result contains any embedded NUL bytes. */
2376
2377 char *
2378 target_read_stralloc (struct target_ops *ops, enum target_object object,
2379 const char *annex)
2380 {
2381 char *buffer;
2382 LONGEST i, transferred;
2383
2384 transferred = target_read_alloc_1 (ops, object, annex,
2385 (gdb_byte **) &buffer, 1);
2386
2387 if (transferred < 0)
2388 return NULL;
2389
2390 if (transferred == 0)
2391 return xstrdup ("");
2392
2393 buffer[transferred] = 0;
2394
2395 /* Check for embedded NUL bytes; but allow trailing NULs. */
2396 for (i = strlen (buffer); i < transferred; i++)
2397 if (buffer[i] != 0)
2398 {
2399 warning (_("target object %d, annex %s, "
2400 "contained unexpected null characters"),
2401 (int) object, annex ? annex : "(none)");
2402 break;
2403 }
2404
2405 return buffer;
2406 }
2407
2408 /* Memory transfer methods. */
2409
2410 void
2411 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2412 LONGEST len)
2413 {
2414 /* This method is used to read from an alternate, non-current
2415 target. This read must bypass the overlay support (as symbols
2416 don't match this target), and GDB's internal cache (wrong cache
2417 for this target). */
2418 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2419 != len)
2420 memory_error (EIO, addr);
2421 }
2422
2423 ULONGEST
2424 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2425 int len, enum bfd_endian byte_order)
2426 {
2427 gdb_byte buf[sizeof (ULONGEST)];
2428
2429 gdb_assert (len <= sizeof (buf));
2430 get_target_memory (ops, addr, buf, len);
2431 return extract_unsigned_integer (buf, len, byte_order);
2432 }
2433
2434 int
2435 target_insert_breakpoint (struct gdbarch *gdbarch,
2436 struct bp_target_info *bp_tgt)
2437 {
2438 if (!may_insert_breakpoints)
2439 {
2440 warning (_("May not insert breakpoints"));
2441 return 1;
2442 }
2443
2444 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2445 }
2446
2447 int
2448 target_remove_breakpoint (struct gdbarch *gdbarch,
2449 struct bp_target_info *bp_tgt)
2450 {
2451 /* This is kind of a weird case to handle, but the permission might
2452 have been changed after breakpoints were inserted - in which case
2453 we should just take the user literally and assume that any
2454 breakpoints should be left in place. */
2455 if (!may_insert_breakpoints)
2456 {
2457 warning (_("May not remove breakpoints"));
2458 return 1;
2459 }
2460
2461 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2462 }
2463
2464 static void
2465 target_info (char *args, int from_tty)
2466 {
2467 struct target_ops *t;
2468 int has_all_mem = 0;
2469
2470 if (symfile_objfile != NULL)
2471 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2472
2473 for (t = target_stack; t != NULL; t = t->beneath)
2474 {
2475 if (!(*t->to_has_memory) (t))
2476 continue;
2477
2478 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2479 continue;
2480 if (has_all_mem)
2481 printf_unfiltered (_("\tWhile running this, "
2482 "GDB does not access memory from...\n"));
2483 printf_unfiltered ("%s:\n", t->to_longname);
2484 (t->to_files_info) (t);
2485 has_all_mem = (*t->to_has_all_memory) (t);
2486 }
2487 }
2488
2489 /* This function is called before any new inferior is created, e.g.
2490 by running a program, attaching, or connecting to a target.
2491 It cleans up any state from previous invocations which might
2492 change between runs. This is a subset of what target_preopen
2493 resets (things which might change between targets). */
2494
2495 void
2496 target_pre_inferior (int from_tty)
2497 {
2498 /* Clear out solib state. Otherwise the solib state of the previous
2499 inferior might have survived and is entirely wrong for the new
2500 target. This has been observed on GNU/Linux using glibc 2.3. How
2501 to reproduce:
2502
2503 bash$ ./foo&
2504 [1] 4711
2505 bash$ ./foo&
2506 [1] 4712
2507 bash$ gdb ./foo
2508 [...]
2509 (gdb) attach 4711
2510 (gdb) detach
2511 (gdb) attach 4712
2512 Cannot access memory at address 0xdeadbeef
2513 */
2514
2515 /* In some OSs, the shared library list is the same/global/shared
2516 across inferiors. If code is shared between processes, so are
2517 memory regions and features. */
2518 if (!gdbarch_has_global_solist (target_gdbarch ()))
2519 {
2520 no_shared_libraries (NULL, from_tty);
2521
2522 invalidate_target_mem_regions ();
2523
2524 target_clear_description ();
2525 }
2526
2527 agent_capability_invalidate ();
2528 }
2529
2530 /* Callback for iterate_over_inferiors. Gets rid of the given
2531 inferior. */
2532
2533 static int
2534 dispose_inferior (struct inferior *inf, void *args)
2535 {
2536 struct thread_info *thread;
2537
2538 thread = any_thread_of_process (inf->pid);
2539 if (thread)
2540 {
2541 switch_to_thread (thread->ptid);
2542
2543 /* Core inferiors actually should be detached, not killed. */
2544 if (target_has_execution)
2545 target_kill ();
2546 else
2547 target_detach (NULL, 0);
2548 }
2549
2550 return 0;
2551 }
2552
2553 /* This is to be called by the open routine before it does
2554 anything. */
2555
2556 void
2557 target_preopen (int from_tty)
2558 {
2559 dont_repeat ();
2560
2561 if (have_inferiors ())
2562 {
2563 if (!from_tty
2564 || !have_live_inferiors ()
2565 || query (_("A program is being debugged already. Kill it? ")))
2566 iterate_over_inferiors (dispose_inferior, NULL);
2567 else
2568 error (_("Program not killed."));
2569 }
2570
2571 /* Calling target_kill may remove the target from the stack. But if
2572 it doesn't (which seems like a win for UDI), remove it now. */
2573 /* Leave the exec target, though. The user may be switching from a
2574 live process to a core of the same program. */
2575 pop_all_targets_above (file_stratum, 0);
2576
2577 target_pre_inferior (from_tty);
2578 }
2579
2580 /* Detach a target after doing deferred register stores. */
2581
2582 void
2583 target_detach (char *args, int from_tty)
2584 {
2585 struct target_ops* t;
2586
2587 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2588 /* Don't remove global breakpoints here. They're removed on
2589 disconnection from the target. */
2590 ;
2591 else
2592 /* If we're in breakpoints-always-inserted mode, have to remove
2593 them before detaching. */
2594 remove_breakpoints_pid (PIDGET (inferior_ptid));
2595
2596 prepare_for_detach ();
2597
2598 for (t = current_target.beneath; t != NULL; t = t->beneath)
2599 {
2600 if (t->to_detach != NULL)
2601 {
2602 t->to_detach (t, args, from_tty);
2603 if (targetdebug)
2604 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2605 args, from_tty);
2606 return;
2607 }
2608 }
2609
2610 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2611 }
2612
2613 void
2614 target_disconnect (char *args, int from_tty)
2615 {
2616 struct target_ops *t;
2617
2618 /* If we're in breakpoints-always-inserted mode or if breakpoints
2619 are global across processes, we have to remove them before
2620 disconnecting. */
2621 remove_breakpoints ();
2622
2623 for (t = current_target.beneath; t != NULL; t = t->beneath)
2624 if (t->to_disconnect != NULL)
2625 {
2626 if (targetdebug)
2627 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2628 args, from_tty);
2629 t->to_disconnect (t, args, from_tty);
2630 return;
2631 }
2632
2633 tcomplain ();
2634 }
2635
2636 ptid_t
2637 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2638 {
2639 struct target_ops *t;
2640
2641 for (t = current_target.beneath; t != NULL; t = t->beneath)
2642 {
2643 if (t->to_wait != NULL)
2644 {
2645 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2646
2647 if (targetdebug)
2648 {
2649 char *status_string;
2650 char *options_string;
2651
2652 status_string = target_waitstatus_to_string (status);
2653 options_string = target_options_to_string (options);
2654 fprintf_unfiltered (gdb_stdlog,
2655 "target_wait (%d, status, options={%s})"
2656 " = %d, %s\n",
2657 PIDGET (ptid), options_string,
2658 PIDGET (retval), status_string);
2659 xfree (status_string);
2660 xfree (options_string);
2661 }
2662
2663 return retval;
2664 }
2665 }
2666
2667 noprocess ();
2668 }
2669
2670 char *
2671 target_pid_to_str (ptid_t ptid)
2672 {
2673 struct target_ops *t;
2674
2675 for (t = current_target.beneath; t != NULL; t = t->beneath)
2676 {
2677 if (t->to_pid_to_str != NULL)
2678 return (*t->to_pid_to_str) (t, ptid);
2679 }
2680
2681 return normal_pid_to_str (ptid);
2682 }
2683
2684 char *
2685 target_thread_name (struct thread_info *info)
2686 {
2687 struct target_ops *t;
2688
2689 for (t = current_target.beneath; t != NULL; t = t->beneath)
2690 {
2691 if (t->to_thread_name != NULL)
2692 return (*t->to_thread_name) (info);
2693 }
2694
2695 return NULL;
2696 }
2697
2698 void
2699 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2700 {
2701 struct target_ops *t;
2702
2703 target_dcache_invalidate ();
2704
2705 for (t = current_target.beneath; t != NULL; t = t->beneath)
2706 {
2707 if (t->to_resume != NULL)
2708 {
2709 t->to_resume (t, ptid, step, signal);
2710 if (targetdebug)
2711 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2712 PIDGET (ptid),
2713 step ? "step" : "continue",
2714 gdb_signal_to_name (signal));
2715
2716 registers_changed_ptid (ptid);
2717 set_executing (ptid, 1);
2718 set_running (ptid, 1);
2719 clear_inline_frame_state (ptid);
2720 return;
2721 }
2722 }
2723
2724 noprocess ();
2725 }
2726
2727 void
2728 target_pass_signals (int numsigs, unsigned char *pass_signals)
2729 {
2730 struct target_ops *t;
2731
2732 for (t = current_target.beneath; t != NULL; t = t->beneath)
2733 {
2734 if (t->to_pass_signals != NULL)
2735 {
2736 if (targetdebug)
2737 {
2738 int i;
2739
2740 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2741 numsigs);
2742
2743 for (i = 0; i < numsigs; i++)
2744 if (pass_signals[i])
2745 fprintf_unfiltered (gdb_stdlog, " %s",
2746 gdb_signal_to_name (i));
2747
2748 fprintf_unfiltered (gdb_stdlog, " })\n");
2749 }
2750
2751 (*t->to_pass_signals) (numsigs, pass_signals);
2752 return;
2753 }
2754 }
2755 }
2756
2757 void
2758 target_program_signals (int numsigs, unsigned char *program_signals)
2759 {
2760 struct target_ops *t;
2761
2762 for (t = current_target.beneath; t != NULL; t = t->beneath)
2763 {
2764 if (t->to_program_signals != NULL)
2765 {
2766 if (targetdebug)
2767 {
2768 int i;
2769
2770 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2771 numsigs);
2772
2773 for (i = 0; i < numsigs; i++)
2774 if (program_signals[i])
2775 fprintf_unfiltered (gdb_stdlog, " %s",
2776 gdb_signal_to_name (i));
2777
2778 fprintf_unfiltered (gdb_stdlog, " })\n");
2779 }
2780
2781 (*t->to_program_signals) (numsigs, program_signals);
2782 return;
2783 }
2784 }
2785 }
2786
2787 /* Look through the list of possible targets for a target that can
2788 follow forks. */
2789
2790 int
2791 target_follow_fork (int follow_child)
2792 {
2793 struct target_ops *t;
2794
2795 for (t = current_target.beneath; t != NULL; t = t->beneath)
2796 {
2797 if (t->to_follow_fork != NULL)
2798 {
2799 int retval = t->to_follow_fork (t, follow_child);
2800
2801 if (targetdebug)
2802 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2803 follow_child, retval);
2804 return retval;
2805 }
2806 }
2807
2808 /* Some target returned a fork event, but did not know how to follow it. */
2809 internal_error (__FILE__, __LINE__,
2810 _("could not find a target to follow fork"));
2811 }
2812
2813 void
2814 target_mourn_inferior (void)
2815 {
2816 struct target_ops *t;
2817
2818 for (t = current_target.beneath; t != NULL; t = t->beneath)
2819 {
2820 if (t->to_mourn_inferior != NULL)
2821 {
2822 t->to_mourn_inferior (t);
2823 if (targetdebug)
2824 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2825
2826 /* We no longer need to keep handles on any of the object files.
2827 Make sure to release them to avoid unnecessarily locking any
2828 of them while we're not actually debugging. */
2829 bfd_cache_close_all ();
2830
2831 return;
2832 }
2833 }
2834
2835 internal_error (__FILE__, __LINE__,
2836 _("could not find a target to follow mourn inferior"));
2837 }
2838
2839 /* Look for a target which can describe architectural features, starting
2840 from TARGET. If we find one, return its description. */
2841
2842 const struct target_desc *
2843 target_read_description (struct target_ops *target)
2844 {
2845 struct target_ops *t;
2846
2847 for (t = target; t != NULL; t = t->beneath)
2848 if (t->to_read_description != NULL)
2849 {
2850 const struct target_desc *tdesc;
2851
2852 tdesc = t->to_read_description (t);
2853 if (tdesc)
2854 return tdesc;
2855 }
2856
2857 return NULL;
2858 }
2859
2860 /* The default implementation of to_search_memory.
2861 This implements a basic search of memory, reading target memory and
2862 performing the search here (as opposed to performing the search in on the
2863 target side with, for example, gdbserver). */
2864
2865 int
2866 simple_search_memory (struct target_ops *ops,
2867 CORE_ADDR start_addr, ULONGEST search_space_len,
2868 const gdb_byte *pattern, ULONGEST pattern_len,
2869 CORE_ADDR *found_addrp)
2870 {
2871 /* NOTE: also defined in find.c testcase. */
2872 #define SEARCH_CHUNK_SIZE 16000
2873 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2874 /* Buffer to hold memory contents for searching. */
2875 gdb_byte *search_buf;
2876 unsigned search_buf_size;
2877 struct cleanup *old_cleanups;
2878
2879 search_buf_size = chunk_size + pattern_len - 1;
2880
2881 /* No point in trying to allocate a buffer larger than the search space. */
2882 if (search_space_len < search_buf_size)
2883 search_buf_size = search_space_len;
2884
2885 search_buf = malloc (search_buf_size);
2886 if (search_buf == NULL)
2887 error (_("Unable to allocate memory to perform the search."));
2888 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2889
2890 /* Prime the search buffer. */
2891
2892 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2893 search_buf, start_addr, search_buf_size) != search_buf_size)
2894 {
2895 warning (_("Unable to access %s bytes of target "
2896 "memory at %s, halting search."),
2897 pulongest (search_buf_size), hex_string (start_addr));
2898 do_cleanups (old_cleanups);
2899 return -1;
2900 }
2901
2902 /* Perform the search.
2903
2904 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2905 When we've scanned N bytes we copy the trailing bytes to the start and
2906 read in another N bytes. */
2907
2908 while (search_space_len >= pattern_len)
2909 {
2910 gdb_byte *found_ptr;
2911 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2912
2913 found_ptr = memmem (search_buf, nr_search_bytes,
2914 pattern, pattern_len);
2915
2916 if (found_ptr != NULL)
2917 {
2918 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2919
2920 *found_addrp = found_addr;
2921 do_cleanups (old_cleanups);
2922 return 1;
2923 }
2924
2925 /* Not found in this chunk, skip to next chunk. */
2926
2927 /* Don't let search_space_len wrap here, it's unsigned. */
2928 if (search_space_len >= chunk_size)
2929 search_space_len -= chunk_size;
2930 else
2931 search_space_len = 0;
2932
2933 if (search_space_len >= pattern_len)
2934 {
2935 unsigned keep_len = search_buf_size - chunk_size;
2936 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2937 int nr_to_read;
2938
2939 /* Copy the trailing part of the previous iteration to the front
2940 of the buffer for the next iteration. */
2941 gdb_assert (keep_len == pattern_len - 1);
2942 memcpy (search_buf, search_buf + chunk_size, keep_len);
2943
2944 nr_to_read = min (search_space_len - keep_len, chunk_size);
2945
2946 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2947 search_buf + keep_len, read_addr,
2948 nr_to_read) != nr_to_read)
2949 {
2950 warning (_("Unable to access %s bytes of target "
2951 "memory at %s, halting search."),
2952 plongest (nr_to_read),
2953 hex_string (read_addr));
2954 do_cleanups (old_cleanups);
2955 return -1;
2956 }
2957
2958 start_addr += chunk_size;
2959 }
2960 }
2961
2962 /* Not found. */
2963
2964 do_cleanups (old_cleanups);
2965 return 0;
2966 }
2967
2968 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2969 sequence of bytes in PATTERN with length PATTERN_LEN.
2970
2971 The result is 1 if found, 0 if not found, and -1 if there was an error
2972 requiring halting of the search (e.g. memory read error).
2973 If the pattern is found the address is recorded in FOUND_ADDRP. */
2974
2975 int
2976 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2977 const gdb_byte *pattern, ULONGEST pattern_len,
2978 CORE_ADDR *found_addrp)
2979 {
2980 struct target_ops *t;
2981 int found;
2982
2983 /* We don't use INHERIT to set current_target.to_search_memory,
2984 so we have to scan the target stack and handle targetdebug
2985 ourselves. */
2986
2987 if (targetdebug)
2988 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2989 hex_string (start_addr));
2990
2991 for (t = current_target.beneath; t != NULL; t = t->beneath)
2992 if (t->to_search_memory != NULL)
2993 break;
2994
2995 if (t != NULL)
2996 {
2997 found = t->to_search_memory (t, start_addr, search_space_len,
2998 pattern, pattern_len, found_addrp);
2999 }
3000 else
3001 {
3002 /* If a special version of to_search_memory isn't available, use the
3003 simple version. */
3004 found = simple_search_memory (current_target.beneath,
3005 start_addr, search_space_len,
3006 pattern, pattern_len, found_addrp);
3007 }
3008
3009 if (targetdebug)
3010 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3011
3012 return found;
3013 }
3014
3015 /* Look through the currently pushed targets. If none of them will
3016 be able to restart the currently running process, issue an error
3017 message. */
3018
3019 void
3020 target_require_runnable (void)
3021 {
3022 struct target_ops *t;
3023
3024 for (t = target_stack; t != NULL; t = t->beneath)
3025 {
3026 /* If this target knows how to create a new program, then
3027 assume we will still be able to after killing the current
3028 one. Either killing and mourning will not pop T, or else
3029 find_default_run_target will find it again. */
3030 if (t->to_create_inferior != NULL)
3031 return;
3032
3033 /* Do not worry about thread_stratum targets that can not
3034 create inferiors. Assume they will be pushed again if
3035 necessary, and continue to the process_stratum. */
3036 if (t->to_stratum == thread_stratum
3037 || t->to_stratum == arch_stratum)
3038 continue;
3039
3040 error (_("The \"%s\" target does not support \"run\". "
3041 "Try \"help target\" or \"continue\"."),
3042 t->to_shortname);
3043 }
3044
3045 /* This function is only called if the target is running. In that
3046 case there should have been a process_stratum target and it
3047 should either know how to create inferiors, or not... */
3048 internal_error (__FILE__, __LINE__, _("No targets found"));
3049 }
3050
3051 /* Look through the list of possible targets for a target that can
3052 execute a run or attach command without any other data. This is
3053 used to locate the default process stratum.
3054
3055 If DO_MESG is not NULL, the result is always valid (error() is
3056 called for errors); else, return NULL on error. */
3057
3058 static struct target_ops *
3059 find_default_run_target (char *do_mesg)
3060 {
3061 struct target_ops **t;
3062 struct target_ops *runable = NULL;
3063 int count;
3064
3065 count = 0;
3066
3067 for (t = target_structs; t < target_structs + target_struct_size;
3068 ++t)
3069 {
3070 if ((*t)->to_can_run && target_can_run (*t))
3071 {
3072 runable = *t;
3073 ++count;
3074 }
3075 }
3076
3077 if (count != 1)
3078 {
3079 if (do_mesg)
3080 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3081 else
3082 return NULL;
3083 }
3084
3085 return runable;
3086 }
3087
3088 void
3089 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3090 {
3091 struct target_ops *t;
3092
3093 t = find_default_run_target ("attach");
3094 (t->to_attach) (t, args, from_tty);
3095 return;
3096 }
3097
3098 void
3099 find_default_create_inferior (struct target_ops *ops,
3100 char *exec_file, char *allargs, char **env,
3101 int from_tty)
3102 {
3103 struct target_ops *t;
3104
3105 t = find_default_run_target ("run");
3106 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3107 return;
3108 }
3109
3110 static int
3111 find_default_can_async_p (void)
3112 {
3113 struct target_ops *t;
3114
3115 /* This may be called before the target is pushed on the stack;
3116 look for the default process stratum. If there's none, gdb isn't
3117 configured with a native debugger, and target remote isn't
3118 connected yet. */
3119 t = find_default_run_target (NULL);
3120 if (t && t->to_can_async_p)
3121 return (t->to_can_async_p) ();
3122 return 0;
3123 }
3124
3125 static int
3126 find_default_is_async_p (void)
3127 {
3128 struct target_ops *t;
3129
3130 /* This may be called before the target is pushed on the stack;
3131 look for the default process stratum. If there's none, gdb isn't
3132 configured with a native debugger, and target remote isn't
3133 connected yet. */
3134 t = find_default_run_target (NULL);
3135 if (t && t->to_is_async_p)
3136 return (t->to_is_async_p) ();
3137 return 0;
3138 }
3139
3140 static int
3141 find_default_supports_non_stop (void)
3142 {
3143 struct target_ops *t;
3144
3145 t = find_default_run_target (NULL);
3146 if (t && t->to_supports_non_stop)
3147 return (t->to_supports_non_stop) ();
3148 return 0;
3149 }
3150
3151 int
3152 target_supports_non_stop (void)
3153 {
3154 struct target_ops *t;
3155
3156 for (t = &current_target; t != NULL; t = t->beneath)
3157 if (t->to_supports_non_stop)
3158 return t->to_supports_non_stop ();
3159
3160 return 0;
3161 }
3162
3163 /* Implement the "info proc" command. */
3164
3165 int
3166 target_info_proc (char *args, enum info_proc_what what)
3167 {
3168 struct target_ops *t;
3169
3170 /* If we're already connected to something that can get us OS
3171 related data, use it. Otherwise, try using the native
3172 target. */
3173 if (current_target.to_stratum >= process_stratum)
3174 t = current_target.beneath;
3175 else
3176 t = find_default_run_target (NULL);
3177
3178 for (; t != NULL; t = t->beneath)
3179 {
3180 if (t->to_info_proc != NULL)
3181 {
3182 t->to_info_proc (t, args, what);
3183
3184 if (targetdebug)
3185 fprintf_unfiltered (gdb_stdlog,
3186 "target_info_proc (\"%s\", %d)\n", args, what);
3187
3188 return 1;
3189 }
3190 }
3191
3192 return 0;
3193 }
3194
3195 static int
3196 find_default_supports_disable_randomization (void)
3197 {
3198 struct target_ops *t;
3199
3200 t = find_default_run_target (NULL);
3201 if (t && t->to_supports_disable_randomization)
3202 return (t->to_supports_disable_randomization) ();
3203 return 0;
3204 }
3205
3206 int
3207 target_supports_disable_randomization (void)
3208 {
3209 struct target_ops *t;
3210
3211 for (t = &current_target; t != NULL; t = t->beneath)
3212 if (t->to_supports_disable_randomization)
3213 return t->to_supports_disable_randomization ();
3214
3215 return 0;
3216 }
3217
3218 char *
3219 target_get_osdata (const char *type)
3220 {
3221 struct target_ops *t;
3222
3223 /* If we're already connected to something that can get us OS
3224 related data, use it. Otherwise, try using the native
3225 target. */
3226 if (current_target.to_stratum >= process_stratum)
3227 t = current_target.beneath;
3228 else
3229 t = find_default_run_target ("get OS data");
3230
3231 if (!t)
3232 return NULL;
3233
3234 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3235 }
3236
3237 /* Determine the current address space of thread PTID. */
3238
3239 struct address_space *
3240 target_thread_address_space (ptid_t ptid)
3241 {
3242 struct address_space *aspace;
3243 struct inferior *inf;
3244 struct target_ops *t;
3245
3246 for (t = current_target.beneath; t != NULL; t = t->beneath)
3247 {
3248 if (t->to_thread_address_space != NULL)
3249 {
3250 aspace = t->to_thread_address_space (t, ptid);
3251 gdb_assert (aspace);
3252
3253 if (targetdebug)
3254 fprintf_unfiltered (gdb_stdlog,
3255 "target_thread_address_space (%s) = %d\n",
3256 target_pid_to_str (ptid),
3257 address_space_num (aspace));
3258 return aspace;
3259 }
3260 }
3261
3262 /* Fall-back to the "main" address space of the inferior. */
3263 inf = find_inferior_pid (ptid_get_pid (ptid));
3264
3265 if (inf == NULL || inf->aspace == NULL)
3266 internal_error (__FILE__, __LINE__,
3267 _("Can't determine the current "
3268 "address space of thread %s\n"),
3269 target_pid_to_str (ptid));
3270
3271 return inf->aspace;
3272 }
3273
3274
3275 /* Target file operations. */
3276
3277 static struct target_ops *
3278 default_fileio_target (void)
3279 {
3280 /* If we're already connected to something that can perform
3281 file I/O, use it. Otherwise, try using the native target. */
3282 if (current_target.to_stratum >= process_stratum)
3283 return current_target.beneath;
3284 else
3285 return find_default_run_target ("file I/O");
3286 }
3287
3288 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3289 target file descriptor, or -1 if an error occurs (and set
3290 *TARGET_ERRNO). */
3291 int
3292 target_fileio_open (const char *filename, int flags, int mode,
3293 int *target_errno)
3294 {
3295 struct target_ops *t;
3296
3297 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3298 {
3299 if (t->to_fileio_open != NULL)
3300 {
3301 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3302
3303 if (targetdebug)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3306 filename, flags, mode,
3307 fd, fd != -1 ? 0 : *target_errno);
3308 return fd;
3309 }
3310 }
3311
3312 *target_errno = FILEIO_ENOSYS;
3313 return -1;
3314 }
3315
3316 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3317 Return the number of bytes written, or -1 if an error occurs
3318 (and set *TARGET_ERRNO). */
3319 int
3320 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3321 ULONGEST offset, int *target_errno)
3322 {
3323 struct target_ops *t;
3324
3325 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3326 {
3327 if (t->to_fileio_pwrite != NULL)
3328 {
3329 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3330 target_errno);
3331
3332 if (targetdebug)
3333 fprintf_unfiltered (gdb_stdlog,
3334 "target_fileio_pwrite (%d,...,%d,%s) "
3335 "= %d (%d)\n",
3336 fd, len, pulongest (offset),
3337 ret, ret != -1 ? 0 : *target_errno);
3338 return ret;
3339 }
3340 }
3341
3342 *target_errno = FILEIO_ENOSYS;
3343 return -1;
3344 }
3345
3346 /* Read up to LEN bytes FD on the target into READ_BUF.
3347 Return the number of bytes read, or -1 if an error occurs
3348 (and set *TARGET_ERRNO). */
3349 int
3350 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3351 ULONGEST offset, int *target_errno)
3352 {
3353 struct target_ops *t;
3354
3355 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3356 {
3357 if (t->to_fileio_pread != NULL)
3358 {
3359 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3360 target_errno);
3361
3362 if (targetdebug)
3363 fprintf_unfiltered (gdb_stdlog,
3364 "target_fileio_pread (%d,...,%d,%s) "
3365 "= %d (%d)\n",
3366 fd, len, pulongest (offset),
3367 ret, ret != -1 ? 0 : *target_errno);
3368 return ret;
3369 }
3370 }
3371
3372 *target_errno = FILEIO_ENOSYS;
3373 return -1;
3374 }
3375
3376 /* Close FD on the target. Return 0, or -1 if an error occurs
3377 (and set *TARGET_ERRNO). */
3378 int
3379 target_fileio_close (int fd, int *target_errno)
3380 {
3381 struct target_ops *t;
3382
3383 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3384 {
3385 if (t->to_fileio_close != NULL)
3386 {
3387 int ret = t->to_fileio_close (fd, target_errno);
3388
3389 if (targetdebug)
3390 fprintf_unfiltered (gdb_stdlog,
3391 "target_fileio_close (%d) = %d (%d)\n",
3392 fd, ret, ret != -1 ? 0 : *target_errno);
3393 return ret;
3394 }
3395 }
3396
3397 *target_errno = FILEIO_ENOSYS;
3398 return -1;
3399 }
3400
3401 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3402 occurs (and set *TARGET_ERRNO). */
3403 int
3404 target_fileio_unlink (const char *filename, int *target_errno)
3405 {
3406 struct target_ops *t;
3407
3408 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3409 {
3410 if (t->to_fileio_unlink != NULL)
3411 {
3412 int ret = t->to_fileio_unlink (filename, target_errno);
3413
3414 if (targetdebug)
3415 fprintf_unfiltered (gdb_stdlog,
3416 "target_fileio_unlink (%s) = %d (%d)\n",
3417 filename, ret, ret != -1 ? 0 : *target_errno);
3418 return ret;
3419 }
3420 }
3421
3422 *target_errno = FILEIO_ENOSYS;
3423 return -1;
3424 }
3425
3426 /* Read value of symbolic link FILENAME on the target. Return a
3427 null-terminated string allocated via xmalloc, or NULL if an error
3428 occurs (and set *TARGET_ERRNO). */
3429 char *
3430 target_fileio_readlink (const char *filename, int *target_errno)
3431 {
3432 struct target_ops *t;
3433
3434 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3435 {
3436 if (t->to_fileio_readlink != NULL)
3437 {
3438 char *ret = t->to_fileio_readlink (filename, target_errno);
3439
3440 if (targetdebug)
3441 fprintf_unfiltered (gdb_stdlog,
3442 "target_fileio_readlink (%s) = %s (%d)\n",
3443 filename, ret? ret : "(nil)",
3444 ret? 0 : *target_errno);
3445 return ret;
3446 }
3447 }
3448
3449 *target_errno = FILEIO_ENOSYS;
3450 return NULL;
3451 }
3452
3453 static void
3454 target_fileio_close_cleanup (void *opaque)
3455 {
3456 int fd = *(int *) opaque;
3457 int target_errno;
3458
3459 target_fileio_close (fd, &target_errno);
3460 }
3461
3462 /* Read target file FILENAME. Store the result in *BUF_P and
3463 return the size of the transferred data. PADDING additional bytes are
3464 available in *BUF_P. This is a helper function for
3465 target_fileio_read_alloc; see the declaration of that function for more
3466 information. */
3467
3468 static LONGEST
3469 target_fileio_read_alloc_1 (const char *filename,
3470 gdb_byte **buf_p, int padding)
3471 {
3472 struct cleanup *close_cleanup;
3473 size_t buf_alloc, buf_pos;
3474 gdb_byte *buf;
3475 LONGEST n;
3476 int fd;
3477 int target_errno;
3478
3479 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3480 if (fd == -1)
3481 return -1;
3482
3483 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3484
3485 /* Start by reading up to 4K at a time. The target will throttle
3486 this number down if necessary. */
3487 buf_alloc = 4096;
3488 buf = xmalloc (buf_alloc);
3489 buf_pos = 0;
3490 while (1)
3491 {
3492 n = target_fileio_pread (fd, &buf[buf_pos],
3493 buf_alloc - buf_pos - padding, buf_pos,
3494 &target_errno);
3495 if (n < 0)
3496 {
3497 /* An error occurred. */
3498 do_cleanups (close_cleanup);
3499 xfree (buf);
3500 return -1;
3501 }
3502 else if (n == 0)
3503 {
3504 /* Read all there was. */
3505 do_cleanups (close_cleanup);
3506 if (buf_pos == 0)
3507 xfree (buf);
3508 else
3509 *buf_p = buf;
3510 return buf_pos;
3511 }
3512
3513 buf_pos += n;
3514
3515 /* If the buffer is filling up, expand it. */
3516 if (buf_alloc < buf_pos * 2)
3517 {
3518 buf_alloc *= 2;
3519 buf = xrealloc (buf, buf_alloc);
3520 }
3521
3522 QUIT;
3523 }
3524 }
3525
3526 /* Read target file FILENAME. Store the result in *BUF_P and return
3527 the size of the transferred data. See the declaration in "target.h"
3528 function for more information about the return value. */
3529
3530 LONGEST
3531 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3532 {
3533 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3534 }
3535
3536 /* Read target file FILENAME. The result is NUL-terminated and
3537 returned as a string, allocated using xmalloc. If an error occurs
3538 or the transfer is unsupported, NULL is returned. Empty objects
3539 are returned as allocated but empty strings. A warning is issued
3540 if the result contains any embedded NUL bytes. */
3541
3542 char *
3543 target_fileio_read_stralloc (const char *filename)
3544 {
3545 char *buffer;
3546 LONGEST i, transferred;
3547
3548 transferred = target_fileio_read_alloc_1 (filename,
3549 (gdb_byte **) &buffer, 1);
3550
3551 if (transferred < 0)
3552 return NULL;
3553
3554 if (transferred == 0)
3555 return xstrdup ("");
3556
3557 buffer[transferred] = 0;
3558
3559 /* Check for embedded NUL bytes; but allow trailing NULs. */
3560 for (i = strlen (buffer); i < transferred; i++)
3561 if (buffer[i] != 0)
3562 {
3563 warning (_("target file %s "
3564 "contained unexpected null characters"),
3565 filename);
3566 break;
3567 }
3568
3569 return buffer;
3570 }
3571
3572
3573 static int
3574 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3575 {
3576 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3577 }
3578
3579 static int
3580 default_watchpoint_addr_within_range (struct target_ops *target,
3581 CORE_ADDR addr,
3582 CORE_ADDR start, int length)
3583 {
3584 return addr >= start && addr < start + length;
3585 }
3586
3587 static struct gdbarch *
3588 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3589 {
3590 return target_gdbarch ();
3591 }
3592
3593 static int
3594 return_zero (void)
3595 {
3596 return 0;
3597 }
3598
3599 static int
3600 return_one (void)
3601 {
3602 return 1;
3603 }
3604
3605 static int
3606 return_minus_one (void)
3607 {
3608 return -1;
3609 }
3610
3611 /* Find a single runnable target in the stack and return it. If for
3612 some reason there is more than one, return NULL. */
3613
3614 struct target_ops *
3615 find_run_target (void)
3616 {
3617 struct target_ops **t;
3618 struct target_ops *runable = NULL;
3619 int count;
3620
3621 count = 0;
3622
3623 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3624 {
3625 if ((*t)->to_can_run && target_can_run (*t))
3626 {
3627 runable = *t;
3628 ++count;
3629 }
3630 }
3631
3632 return (count == 1 ? runable : NULL);
3633 }
3634
3635 /*
3636 * Find the next target down the stack from the specified target.
3637 */
3638
3639 struct target_ops *
3640 find_target_beneath (struct target_ops *t)
3641 {
3642 return t->beneath;
3643 }
3644
3645 \f
3646 /* The inferior process has died. Long live the inferior! */
3647
3648 void
3649 generic_mourn_inferior (void)
3650 {
3651 ptid_t ptid;
3652
3653 ptid = inferior_ptid;
3654 inferior_ptid = null_ptid;
3655
3656 /* Mark breakpoints uninserted in case something tries to delete a
3657 breakpoint while we delete the inferior's threads (which would
3658 fail, since the inferior is long gone). */
3659 mark_breakpoints_out ();
3660
3661 if (!ptid_equal (ptid, null_ptid))
3662 {
3663 int pid = ptid_get_pid (ptid);
3664 exit_inferior (pid);
3665 }
3666
3667 /* Note this wipes step-resume breakpoints, so needs to be done
3668 after exit_inferior, which ends up referencing the step-resume
3669 breakpoints through clear_thread_inferior_resources. */
3670 breakpoint_init_inferior (inf_exited);
3671
3672 registers_changed ();
3673
3674 reopen_exec_file ();
3675 reinit_frame_cache ();
3676
3677 if (deprecated_detach_hook)
3678 deprecated_detach_hook ();
3679 }
3680 \f
3681 /* Convert a normal process ID to a string. Returns the string in a
3682 static buffer. */
3683
3684 char *
3685 normal_pid_to_str (ptid_t ptid)
3686 {
3687 static char buf[32];
3688
3689 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3690 return buf;
3691 }
3692
3693 static char *
3694 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3695 {
3696 return normal_pid_to_str (ptid);
3697 }
3698
3699 /* Error-catcher for target_find_memory_regions. */
3700 static int
3701 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3702 {
3703 error (_("Command not implemented for this target."));
3704 return 0;
3705 }
3706
3707 /* Error-catcher for target_make_corefile_notes. */
3708 static char *
3709 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3710 {
3711 error (_("Command not implemented for this target."));
3712 return NULL;
3713 }
3714
3715 /* Error-catcher for target_get_bookmark. */
3716 static gdb_byte *
3717 dummy_get_bookmark (char *ignore1, int ignore2)
3718 {
3719 tcomplain ();
3720 return NULL;
3721 }
3722
3723 /* Error-catcher for target_goto_bookmark. */
3724 static void
3725 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3726 {
3727 tcomplain ();
3728 }
3729
3730 /* Set up the handful of non-empty slots needed by the dummy target
3731 vector. */
3732
3733 static void
3734 init_dummy_target (void)
3735 {
3736 dummy_target.to_shortname = "None";
3737 dummy_target.to_longname = "None";
3738 dummy_target.to_doc = "";
3739 dummy_target.to_attach = find_default_attach;
3740 dummy_target.to_detach =
3741 (void (*)(struct target_ops *, char *, int))target_ignore;
3742 dummy_target.to_create_inferior = find_default_create_inferior;
3743 dummy_target.to_can_async_p = find_default_can_async_p;
3744 dummy_target.to_is_async_p = find_default_is_async_p;
3745 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3746 dummy_target.to_supports_disable_randomization
3747 = find_default_supports_disable_randomization;
3748 dummy_target.to_pid_to_str = dummy_pid_to_str;
3749 dummy_target.to_stratum = dummy_stratum;
3750 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3751 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3752 dummy_target.to_get_bookmark = dummy_get_bookmark;
3753 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3754 dummy_target.to_xfer_partial = default_xfer_partial;
3755 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3756 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3757 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3758 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3759 dummy_target.to_has_execution
3760 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3761 dummy_target.to_stopped_by_watchpoint = return_zero;
3762 dummy_target.to_stopped_data_address =
3763 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3764 dummy_target.to_magic = OPS_MAGIC;
3765 }
3766 \f
3767 static void
3768 debug_to_open (char *args, int from_tty)
3769 {
3770 debug_target.to_open (args, from_tty);
3771
3772 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3773 }
3774
3775 void
3776 target_close (struct target_ops *targ, int quitting)
3777 {
3778 if (targ->to_xclose != NULL)
3779 targ->to_xclose (targ, quitting);
3780 else if (targ->to_close != NULL)
3781 targ->to_close (quitting);
3782
3783 if (targetdebug)
3784 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3785 }
3786
3787 void
3788 target_attach (char *args, int from_tty)
3789 {
3790 struct target_ops *t;
3791
3792 for (t = current_target.beneath; t != NULL; t = t->beneath)
3793 {
3794 if (t->to_attach != NULL)
3795 {
3796 t->to_attach (t, args, from_tty);
3797 if (targetdebug)
3798 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3799 args, from_tty);
3800 return;
3801 }
3802 }
3803
3804 internal_error (__FILE__, __LINE__,
3805 _("could not find a target to attach"));
3806 }
3807
3808 int
3809 target_thread_alive (ptid_t ptid)
3810 {
3811 struct target_ops *t;
3812
3813 for (t = current_target.beneath; t != NULL; t = t->beneath)
3814 {
3815 if (t->to_thread_alive != NULL)
3816 {
3817 int retval;
3818
3819 retval = t->to_thread_alive (t, ptid);
3820 if (targetdebug)
3821 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3822 PIDGET (ptid), retval);
3823
3824 return retval;
3825 }
3826 }
3827
3828 return 0;
3829 }
3830
3831 void
3832 target_find_new_threads (void)
3833 {
3834 struct target_ops *t;
3835
3836 for (t = current_target.beneath; t != NULL; t = t->beneath)
3837 {
3838 if (t->to_find_new_threads != NULL)
3839 {
3840 t->to_find_new_threads (t);
3841 if (targetdebug)
3842 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3843
3844 return;
3845 }
3846 }
3847 }
3848
3849 void
3850 target_stop (ptid_t ptid)
3851 {
3852 if (!may_stop)
3853 {
3854 warning (_("May not interrupt or stop the target, ignoring attempt"));
3855 return;
3856 }
3857
3858 (*current_target.to_stop) (ptid);
3859 }
3860
3861 static void
3862 debug_to_post_attach (int pid)
3863 {
3864 debug_target.to_post_attach (pid);
3865
3866 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3867 }
3868
3869 /* Return a pretty printed form of target_waitstatus.
3870 Space for the result is malloc'd, caller must free. */
3871
3872 char *
3873 target_waitstatus_to_string (const struct target_waitstatus *ws)
3874 {
3875 const char *kind_str = "status->kind = ";
3876
3877 switch (ws->kind)
3878 {
3879 case TARGET_WAITKIND_EXITED:
3880 return xstrprintf ("%sexited, status = %d",
3881 kind_str, ws->value.integer);
3882 case TARGET_WAITKIND_STOPPED:
3883 return xstrprintf ("%sstopped, signal = %s",
3884 kind_str, gdb_signal_to_name (ws->value.sig));
3885 case TARGET_WAITKIND_SIGNALLED:
3886 return xstrprintf ("%ssignalled, signal = %s",
3887 kind_str, gdb_signal_to_name (ws->value.sig));
3888 case TARGET_WAITKIND_LOADED:
3889 return xstrprintf ("%sloaded", kind_str);
3890 case TARGET_WAITKIND_FORKED:
3891 return xstrprintf ("%sforked", kind_str);
3892 case TARGET_WAITKIND_VFORKED:
3893 return xstrprintf ("%svforked", kind_str);
3894 case TARGET_WAITKIND_EXECD:
3895 return xstrprintf ("%sexecd", kind_str);
3896 case TARGET_WAITKIND_VFORK_DONE:
3897 return xstrprintf ("%svfork-done", kind_str);
3898 case TARGET_WAITKIND_SYSCALL_ENTRY:
3899 return xstrprintf ("%sentered syscall", kind_str);
3900 case TARGET_WAITKIND_SYSCALL_RETURN:
3901 return xstrprintf ("%sexited syscall", kind_str);
3902 case TARGET_WAITKIND_SPURIOUS:
3903 return xstrprintf ("%sspurious", kind_str);
3904 case TARGET_WAITKIND_IGNORE:
3905 return xstrprintf ("%signore", kind_str);
3906 case TARGET_WAITKIND_NO_HISTORY:
3907 return xstrprintf ("%sno-history", kind_str);
3908 case TARGET_WAITKIND_NO_RESUMED:
3909 return xstrprintf ("%sno-resumed", kind_str);
3910 default:
3911 return xstrprintf ("%sunknown???", kind_str);
3912 }
3913 }
3914
3915 /* Concatenate ELEM to LIST, a comma separate list, and return the
3916 result. The LIST incoming argument is released. */
3917
3918 static char *
3919 str_comma_list_concat_elem (char *list, const char *elem)
3920 {
3921 if (list == NULL)
3922 return xstrdup (elem);
3923 else
3924 return reconcat (list, list, ", ", elem, (char *) NULL);
3925 }
3926
3927 /* Helper for target_options_to_string. If OPT is present in
3928 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3929 Returns the new resulting string. OPT is removed from
3930 TARGET_OPTIONS. */
3931
3932 static char *
3933 do_option (int *target_options, char *ret,
3934 int opt, char *opt_str)
3935 {
3936 if ((*target_options & opt) != 0)
3937 {
3938 ret = str_comma_list_concat_elem (ret, opt_str);
3939 *target_options &= ~opt;
3940 }
3941
3942 return ret;
3943 }
3944
3945 char *
3946 target_options_to_string (int target_options)
3947 {
3948 char *ret = NULL;
3949
3950 #define DO_TARG_OPTION(OPT) \
3951 ret = do_option (&target_options, ret, OPT, #OPT)
3952
3953 DO_TARG_OPTION (TARGET_WNOHANG);
3954
3955 if (target_options != 0)
3956 ret = str_comma_list_concat_elem (ret, "unknown???");
3957
3958 if (ret == NULL)
3959 ret = xstrdup ("");
3960 return ret;
3961 }
3962
3963 static void
3964 debug_print_register (const char * func,
3965 struct regcache *regcache, int regno)
3966 {
3967 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3968
3969 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3970 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3971 && gdbarch_register_name (gdbarch, regno) != NULL
3972 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3973 fprintf_unfiltered (gdb_stdlog, "(%s)",
3974 gdbarch_register_name (gdbarch, regno));
3975 else
3976 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3977 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3978 {
3979 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3980 int i, size = register_size (gdbarch, regno);
3981 gdb_byte buf[MAX_REGISTER_SIZE];
3982
3983 regcache_raw_collect (regcache, regno, buf);
3984 fprintf_unfiltered (gdb_stdlog, " = ");
3985 for (i = 0; i < size; i++)
3986 {
3987 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3988 }
3989 if (size <= sizeof (LONGEST))
3990 {
3991 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3992
3993 fprintf_unfiltered (gdb_stdlog, " %s %s",
3994 core_addr_to_string_nz (val), plongest (val));
3995 }
3996 }
3997 fprintf_unfiltered (gdb_stdlog, "\n");
3998 }
3999
4000 void
4001 target_fetch_registers (struct regcache *regcache, int regno)
4002 {
4003 struct target_ops *t;
4004
4005 for (t = current_target.beneath; t != NULL; t = t->beneath)
4006 {
4007 if (t->to_fetch_registers != NULL)
4008 {
4009 t->to_fetch_registers (t, regcache, regno);
4010 if (targetdebug)
4011 debug_print_register ("target_fetch_registers", regcache, regno);
4012 return;
4013 }
4014 }
4015 }
4016
4017 void
4018 target_store_registers (struct regcache *regcache, int regno)
4019 {
4020 struct target_ops *t;
4021
4022 if (!may_write_registers)
4023 error (_("Writing to registers is not allowed (regno %d)"), regno);
4024
4025 for (t = current_target.beneath; t != NULL; t = t->beneath)
4026 {
4027 if (t->to_store_registers != NULL)
4028 {
4029 t->to_store_registers (t, regcache, regno);
4030 if (targetdebug)
4031 {
4032 debug_print_register ("target_store_registers", regcache, regno);
4033 }
4034 return;
4035 }
4036 }
4037
4038 noprocess ();
4039 }
4040
4041 int
4042 target_core_of_thread (ptid_t ptid)
4043 {
4044 struct target_ops *t;
4045
4046 for (t = current_target.beneath; t != NULL; t = t->beneath)
4047 {
4048 if (t->to_core_of_thread != NULL)
4049 {
4050 int retval = t->to_core_of_thread (t, ptid);
4051
4052 if (targetdebug)
4053 fprintf_unfiltered (gdb_stdlog,
4054 "target_core_of_thread (%d) = %d\n",
4055 PIDGET (ptid), retval);
4056 return retval;
4057 }
4058 }
4059
4060 return -1;
4061 }
4062
4063 int
4064 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4065 {
4066 struct target_ops *t;
4067
4068 for (t = current_target.beneath; t != NULL; t = t->beneath)
4069 {
4070 if (t->to_verify_memory != NULL)
4071 {
4072 int retval = t->to_verify_memory (t, data, memaddr, size);
4073
4074 if (targetdebug)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "target_verify_memory (%s, %s) = %d\n",
4077 paddress (target_gdbarch (), memaddr),
4078 pulongest (size),
4079 retval);
4080 return retval;
4081 }
4082 }
4083
4084 tcomplain ();
4085 }
4086
4087 /* The documentation for this function is in its prototype declaration in
4088 target.h. */
4089
4090 int
4091 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4092 {
4093 struct target_ops *t;
4094
4095 for (t = current_target.beneath; t != NULL; t = t->beneath)
4096 if (t->to_insert_mask_watchpoint != NULL)
4097 {
4098 int ret;
4099
4100 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4101
4102 if (targetdebug)
4103 fprintf_unfiltered (gdb_stdlog, "\
4104 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4105 core_addr_to_string (addr),
4106 core_addr_to_string (mask), rw, ret);
4107
4108 return ret;
4109 }
4110
4111 return 1;
4112 }
4113
4114 /* The documentation for this function is in its prototype declaration in
4115 target.h. */
4116
4117 int
4118 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4119 {
4120 struct target_ops *t;
4121
4122 for (t = current_target.beneath; t != NULL; t = t->beneath)
4123 if (t->to_remove_mask_watchpoint != NULL)
4124 {
4125 int ret;
4126
4127 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4128
4129 if (targetdebug)
4130 fprintf_unfiltered (gdb_stdlog, "\
4131 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4132 core_addr_to_string (addr),
4133 core_addr_to_string (mask), rw, ret);
4134
4135 return ret;
4136 }
4137
4138 return 1;
4139 }
4140
4141 /* The documentation for this function is in its prototype declaration
4142 in target.h. */
4143
4144 int
4145 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4146 {
4147 struct target_ops *t;
4148
4149 for (t = current_target.beneath; t != NULL; t = t->beneath)
4150 if (t->to_masked_watch_num_registers != NULL)
4151 return t->to_masked_watch_num_registers (t, addr, mask);
4152
4153 return -1;
4154 }
4155
4156 /* The documentation for this function is in its prototype declaration
4157 in target.h. */
4158
4159 int
4160 target_ranged_break_num_registers (void)
4161 {
4162 struct target_ops *t;
4163
4164 for (t = current_target.beneath; t != NULL; t = t->beneath)
4165 if (t->to_ranged_break_num_registers != NULL)
4166 return t->to_ranged_break_num_registers (t);
4167
4168 return -1;
4169 }
4170
4171 /* See target.h. */
4172
4173 int
4174 target_supports_btrace (void)
4175 {
4176 struct target_ops *t;
4177
4178 for (t = current_target.beneath; t != NULL; t = t->beneath)
4179 if (t->to_supports_btrace != NULL)
4180 return t->to_supports_btrace ();
4181
4182 return 0;
4183 }
4184
4185 /* See target.h. */
4186
4187 struct btrace_target_info *
4188 target_enable_btrace (ptid_t ptid)
4189 {
4190 struct target_ops *t;
4191
4192 for (t = current_target.beneath; t != NULL; t = t->beneath)
4193 if (t->to_enable_btrace != NULL)
4194 return t->to_enable_btrace (ptid);
4195
4196 tcomplain ();
4197 return NULL;
4198 }
4199
4200 /* See target.h. */
4201
4202 void
4203 target_disable_btrace (struct btrace_target_info *btinfo)
4204 {
4205 struct target_ops *t;
4206
4207 for (t = current_target.beneath; t != NULL; t = t->beneath)
4208 if (t->to_disable_btrace != NULL)
4209 return t->to_disable_btrace (btinfo);
4210
4211 tcomplain ();
4212 }
4213
4214 /* See target.h. */
4215
4216 void
4217 target_teardown_btrace (struct btrace_target_info *btinfo)
4218 {
4219 struct target_ops *t;
4220
4221 for (t = current_target.beneath; t != NULL; t = t->beneath)
4222 if (t->to_teardown_btrace != NULL)
4223 return t->to_teardown_btrace (btinfo);
4224
4225 tcomplain ();
4226 }
4227
4228 /* See target.h. */
4229
4230 VEC (btrace_block_s) *
4231 target_read_btrace (struct btrace_target_info *btinfo,
4232 enum btrace_read_type type)
4233 {
4234 struct target_ops *t;
4235
4236 for (t = current_target.beneath; t != NULL; t = t->beneath)
4237 if (t->to_read_btrace != NULL)
4238 return t->to_read_btrace (btinfo, type);
4239
4240 tcomplain ();
4241 return NULL;
4242 }
4243
4244 /* See target.h. */
4245
4246 void
4247 target_stop_recording (void)
4248 {
4249 struct target_ops *t;
4250
4251 for (t = current_target.beneath; t != NULL; t = t->beneath)
4252 if (t->to_stop_recording != NULL)
4253 {
4254 t->to_stop_recording ();
4255 return;
4256 }
4257
4258 /* This is optional. */
4259 }
4260
4261 /* See target.h. */
4262
4263 void
4264 target_info_record (void)
4265 {
4266 struct target_ops *t;
4267
4268 for (t = current_target.beneath; t != NULL; t = t->beneath)
4269 if (t->to_info_record != NULL)
4270 {
4271 t->to_info_record ();
4272 return;
4273 }
4274
4275 tcomplain ();
4276 }
4277
4278 /* See target.h. */
4279
4280 void
4281 target_save_record (char *filename)
4282 {
4283 struct target_ops *t;
4284
4285 for (t = current_target.beneath; t != NULL; t = t->beneath)
4286 if (t->to_save_record != NULL)
4287 {
4288 t->to_save_record (filename);
4289 return;
4290 }
4291
4292 tcomplain ();
4293 }
4294
4295 /* See target.h. */
4296
4297 int
4298 target_supports_delete_record (void)
4299 {
4300 struct target_ops *t;
4301
4302 for (t = current_target.beneath; t != NULL; t = t->beneath)
4303 if (t->to_delete_record != NULL)
4304 return 1;
4305
4306 return 0;
4307 }
4308
4309 /* See target.h. */
4310
4311 void
4312 target_delete_record (void)
4313 {
4314 struct target_ops *t;
4315
4316 for (t = current_target.beneath; t != NULL; t = t->beneath)
4317 if (t->to_delete_record != NULL)
4318 {
4319 t->to_delete_record ();
4320 return;
4321 }
4322
4323 tcomplain ();
4324 }
4325
4326 /* See target.h. */
4327
4328 int
4329 target_record_is_replaying (void)
4330 {
4331 struct target_ops *t;
4332
4333 for (t = current_target.beneath; t != NULL; t = t->beneath)
4334 if (t->to_record_is_replaying != NULL)
4335 return t->to_record_is_replaying ();
4336
4337 return 0;
4338 }
4339
4340 /* See target.h. */
4341
4342 void
4343 target_goto_record_begin (void)
4344 {
4345 struct target_ops *t;
4346
4347 for (t = current_target.beneath; t != NULL; t = t->beneath)
4348 if (t->to_goto_record_begin != NULL)
4349 {
4350 t->to_goto_record_begin ();
4351 return;
4352 }
4353
4354 tcomplain ();
4355 }
4356
4357 /* See target.h. */
4358
4359 void
4360 target_goto_record_end (void)
4361 {
4362 struct target_ops *t;
4363
4364 for (t = current_target.beneath; t != NULL; t = t->beneath)
4365 if (t->to_goto_record_end != NULL)
4366 {
4367 t->to_goto_record_end ();
4368 return;
4369 }
4370
4371 tcomplain ();
4372 }
4373
4374 /* See target.h. */
4375
4376 void
4377 target_goto_record (ULONGEST insn)
4378 {
4379 struct target_ops *t;
4380
4381 for (t = current_target.beneath; t != NULL; t = t->beneath)
4382 if (t->to_goto_record != NULL)
4383 {
4384 t->to_goto_record (insn);
4385 return;
4386 }
4387
4388 tcomplain ();
4389 }
4390
4391 static void
4392 debug_to_prepare_to_store (struct regcache *regcache)
4393 {
4394 debug_target.to_prepare_to_store (regcache);
4395
4396 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4397 }
4398
4399 static int
4400 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4401 int write, struct mem_attrib *attrib,
4402 struct target_ops *target)
4403 {
4404 int retval;
4405
4406 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4407 attrib, target);
4408
4409 fprintf_unfiltered (gdb_stdlog,
4410 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4411 paddress (target_gdbarch (), memaddr), len,
4412 write ? "write" : "read", retval);
4413
4414 if (retval > 0)
4415 {
4416 int i;
4417
4418 fputs_unfiltered (", bytes =", gdb_stdlog);
4419 for (i = 0; i < retval; i++)
4420 {
4421 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4422 {
4423 if (targetdebug < 2 && i > 0)
4424 {
4425 fprintf_unfiltered (gdb_stdlog, " ...");
4426 break;
4427 }
4428 fprintf_unfiltered (gdb_stdlog, "\n");
4429 }
4430
4431 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4432 }
4433 }
4434
4435 fputc_unfiltered ('\n', gdb_stdlog);
4436
4437 return retval;
4438 }
4439
4440 static void
4441 debug_to_files_info (struct target_ops *target)
4442 {
4443 debug_target.to_files_info (target);
4444
4445 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4446 }
4447
4448 static int
4449 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4450 struct bp_target_info *bp_tgt)
4451 {
4452 int retval;
4453
4454 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4455
4456 fprintf_unfiltered (gdb_stdlog,
4457 "target_insert_breakpoint (%s, xxx) = %ld\n",
4458 core_addr_to_string (bp_tgt->placed_address),
4459 (unsigned long) retval);
4460 return retval;
4461 }
4462
4463 static int
4464 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4465 struct bp_target_info *bp_tgt)
4466 {
4467 int retval;
4468
4469 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4470
4471 fprintf_unfiltered (gdb_stdlog,
4472 "target_remove_breakpoint (%s, xxx) = %ld\n",
4473 core_addr_to_string (bp_tgt->placed_address),
4474 (unsigned long) retval);
4475 return retval;
4476 }
4477
4478 static int
4479 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4480 {
4481 int retval;
4482
4483 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4484
4485 fprintf_unfiltered (gdb_stdlog,
4486 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4487 (unsigned long) type,
4488 (unsigned long) cnt,
4489 (unsigned long) from_tty,
4490 (unsigned long) retval);
4491 return retval;
4492 }
4493
4494 static int
4495 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4496 {
4497 CORE_ADDR retval;
4498
4499 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4500
4501 fprintf_unfiltered (gdb_stdlog,
4502 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4503 core_addr_to_string (addr), (unsigned long) len,
4504 core_addr_to_string (retval));
4505 return retval;
4506 }
4507
4508 static int
4509 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4510 struct expression *cond)
4511 {
4512 int retval;
4513
4514 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4515 rw, cond);
4516
4517 fprintf_unfiltered (gdb_stdlog,
4518 "target_can_accel_watchpoint_condition "
4519 "(%s, %d, %d, %s) = %ld\n",
4520 core_addr_to_string (addr), len, rw,
4521 host_address_to_string (cond), (unsigned long) retval);
4522 return retval;
4523 }
4524
4525 static int
4526 debug_to_stopped_by_watchpoint (void)
4527 {
4528 int retval;
4529
4530 retval = debug_target.to_stopped_by_watchpoint ();
4531
4532 fprintf_unfiltered (gdb_stdlog,
4533 "target_stopped_by_watchpoint () = %ld\n",
4534 (unsigned long) retval);
4535 return retval;
4536 }
4537
4538 static int
4539 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4540 {
4541 int retval;
4542
4543 retval = debug_target.to_stopped_data_address (target, addr);
4544
4545 fprintf_unfiltered (gdb_stdlog,
4546 "target_stopped_data_address ([%s]) = %ld\n",
4547 core_addr_to_string (*addr),
4548 (unsigned long)retval);
4549 return retval;
4550 }
4551
4552 static int
4553 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4554 CORE_ADDR addr,
4555 CORE_ADDR start, int length)
4556 {
4557 int retval;
4558
4559 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4560 start, length);
4561
4562 fprintf_filtered (gdb_stdlog,
4563 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4564 core_addr_to_string (addr), core_addr_to_string (start),
4565 length, retval);
4566 return retval;
4567 }
4568
4569 static int
4570 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4571 struct bp_target_info *bp_tgt)
4572 {
4573 int retval;
4574
4575 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4576
4577 fprintf_unfiltered (gdb_stdlog,
4578 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4579 core_addr_to_string (bp_tgt->placed_address),
4580 (unsigned long) retval);
4581 return retval;
4582 }
4583
4584 static int
4585 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4586 struct bp_target_info *bp_tgt)
4587 {
4588 int retval;
4589
4590 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4591
4592 fprintf_unfiltered (gdb_stdlog,
4593 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4594 core_addr_to_string (bp_tgt->placed_address),
4595 (unsigned long) retval);
4596 return retval;
4597 }
4598
4599 static int
4600 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4601 struct expression *cond)
4602 {
4603 int retval;
4604
4605 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4606
4607 fprintf_unfiltered (gdb_stdlog,
4608 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4609 core_addr_to_string (addr), len, type,
4610 host_address_to_string (cond), (unsigned long) retval);
4611 return retval;
4612 }
4613
4614 static int
4615 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4616 struct expression *cond)
4617 {
4618 int retval;
4619
4620 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4621
4622 fprintf_unfiltered (gdb_stdlog,
4623 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4624 core_addr_to_string (addr), len, type,
4625 host_address_to_string (cond), (unsigned long) retval);
4626 return retval;
4627 }
4628
4629 static void
4630 debug_to_terminal_init (void)
4631 {
4632 debug_target.to_terminal_init ();
4633
4634 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4635 }
4636
4637 static void
4638 debug_to_terminal_inferior (void)
4639 {
4640 debug_target.to_terminal_inferior ();
4641
4642 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4643 }
4644
4645 static void
4646 debug_to_terminal_ours_for_output (void)
4647 {
4648 debug_target.to_terminal_ours_for_output ();
4649
4650 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4651 }
4652
4653 static void
4654 debug_to_terminal_ours (void)
4655 {
4656 debug_target.to_terminal_ours ();
4657
4658 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4659 }
4660
4661 static void
4662 debug_to_terminal_save_ours (void)
4663 {
4664 debug_target.to_terminal_save_ours ();
4665
4666 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4667 }
4668
4669 static void
4670 debug_to_terminal_info (char *arg, int from_tty)
4671 {
4672 debug_target.to_terminal_info (arg, from_tty);
4673
4674 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4675 from_tty);
4676 }
4677
4678 static void
4679 debug_to_load (char *args, int from_tty)
4680 {
4681 debug_target.to_load (args, from_tty);
4682
4683 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4684 }
4685
4686 static void
4687 debug_to_post_startup_inferior (ptid_t ptid)
4688 {
4689 debug_target.to_post_startup_inferior (ptid);
4690
4691 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4692 PIDGET (ptid));
4693 }
4694
4695 static int
4696 debug_to_insert_fork_catchpoint (int pid)
4697 {
4698 int retval;
4699
4700 retval = debug_target.to_insert_fork_catchpoint (pid);
4701
4702 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4703 pid, retval);
4704
4705 return retval;
4706 }
4707
4708 static int
4709 debug_to_remove_fork_catchpoint (int pid)
4710 {
4711 int retval;
4712
4713 retval = debug_target.to_remove_fork_catchpoint (pid);
4714
4715 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4716 pid, retval);
4717
4718 return retval;
4719 }
4720
4721 static int
4722 debug_to_insert_vfork_catchpoint (int pid)
4723 {
4724 int retval;
4725
4726 retval = debug_target.to_insert_vfork_catchpoint (pid);
4727
4728 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4729 pid, retval);
4730
4731 return retval;
4732 }
4733
4734 static int
4735 debug_to_remove_vfork_catchpoint (int pid)
4736 {
4737 int retval;
4738
4739 retval = debug_target.to_remove_vfork_catchpoint (pid);
4740
4741 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4742 pid, retval);
4743
4744 return retval;
4745 }
4746
4747 static int
4748 debug_to_insert_exec_catchpoint (int pid)
4749 {
4750 int retval;
4751
4752 retval = debug_target.to_insert_exec_catchpoint (pid);
4753
4754 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4755 pid, retval);
4756
4757 return retval;
4758 }
4759
4760 static int
4761 debug_to_remove_exec_catchpoint (int pid)
4762 {
4763 int retval;
4764
4765 retval = debug_target.to_remove_exec_catchpoint (pid);
4766
4767 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4768 pid, retval);
4769
4770 return retval;
4771 }
4772
4773 static int
4774 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4775 {
4776 int has_exited;
4777
4778 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4779
4780 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4781 pid, wait_status, *exit_status, has_exited);
4782
4783 return has_exited;
4784 }
4785
4786 static int
4787 debug_to_can_run (void)
4788 {
4789 int retval;
4790
4791 retval = debug_target.to_can_run ();
4792
4793 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4794
4795 return retval;
4796 }
4797
4798 static struct gdbarch *
4799 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4800 {
4801 struct gdbarch *retval;
4802
4803 retval = debug_target.to_thread_architecture (ops, ptid);
4804
4805 fprintf_unfiltered (gdb_stdlog,
4806 "target_thread_architecture (%s) = %s [%s]\n",
4807 target_pid_to_str (ptid),
4808 host_address_to_string (retval),
4809 gdbarch_bfd_arch_info (retval)->printable_name);
4810 return retval;
4811 }
4812
4813 static void
4814 debug_to_stop (ptid_t ptid)
4815 {
4816 debug_target.to_stop (ptid);
4817
4818 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4819 target_pid_to_str (ptid));
4820 }
4821
4822 static void
4823 debug_to_rcmd (char *command,
4824 struct ui_file *outbuf)
4825 {
4826 debug_target.to_rcmd (command, outbuf);
4827 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4828 }
4829
4830 static char *
4831 debug_to_pid_to_exec_file (int pid)
4832 {
4833 char *exec_file;
4834
4835 exec_file = debug_target.to_pid_to_exec_file (pid);
4836
4837 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4838 pid, exec_file);
4839
4840 return exec_file;
4841 }
4842
4843 static void
4844 setup_target_debug (void)
4845 {
4846 memcpy (&debug_target, &current_target, sizeof debug_target);
4847
4848 current_target.to_open = debug_to_open;
4849 current_target.to_post_attach = debug_to_post_attach;
4850 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4851 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4852 current_target.to_files_info = debug_to_files_info;
4853 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4854 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4855 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4856 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4857 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4858 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4859 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4860 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4861 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4862 current_target.to_watchpoint_addr_within_range
4863 = debug_to_watchpoint_addr_within_range;
4864 current_target.to_region_ok_for_hw_watchpoint
4865 = debug_to_region_ok_for_hw_watchpoint;
4866 current_target.to_can_accel_watchpoint_condition
4867 = debug_to_can_accel_watchpoint_condition;
4868 current_target.to_terminal_init = debug_to_terminal_init;
4869 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4870 current_target.to_terminal_ours_for_output
4871 = debug_to_terminal_ours_for_output;
4872 current_target.to_terminal_ours = debug_to_terminal_ours;
4873 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4874 current_target.to_terminal_info = debug_to_terminal_info;
4875 current_target.to_load = debug_to_load;
4876 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4877 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4878 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4879 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4880 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4881 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4882 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4883 current_target.to_has_exited = debug_to_has_exited;
4884 current_target.to_can_run = debug_to_can_run;
4885 current_target.to_stop = debug_to_stop;
4886 current_target.to_rcmd = debug_to_rcmd;
4887 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4888 current_target.to_thread_architecture = debug_to_thread_architecture;
4889 }
4890 \f
4891
4892 static char targ_desc[] =
4893 "Names of targets and files being debugged.\nShows the entire \
4894 stack of targets currently in use (including the exec-file,\n\
4895 core-file, and process, if any), as well as the symbol file name.";
4896
4897 static void
4898 do_monitor_command (char *cmd,
4899 int from_tty)
4900 {
4901 if ((current_target.to_rcmd
4902 == (void (*) (char *, struct ui_file *)) tcomplain)
4903 || (current_target.to_rcmd == debug_to_rcmd
4904 && (debug_target.to_rcmd
4905 == (void (*) (char *, struct ui_file *)) tcomplain)))
4906 error (_("\"monitor\" command not supported by this target."));
4907 target_rcmd (cmd, gdb_stdtarg);
4908 }
4909
4910 /* Print the name of each layers of our target stack. */
4911
4912 static void
4913 maintenance_print_target_stack (char *cmd, int from_tty)
4914 {
4915 struct target_ops *t;
4916
4917 printf_filtered (_("The current target stack is:\n"));
4918
4919 for (t = target_stack; t != NULL; t = t->beneath)
4920 {
4921 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4922 }
4923 }
4924
4925 /* Controls if async mode is permitted. */
4926 int target_async_permitted = 0;
4927
4928 /* The set command writes to this variable. If the inferior is
4929 executing, linux_nat_async_permitted is *not* updated. */
4930 static int target_async_permitted_1 = 0;
4931
4932 static void
4933 set_target_async_command (char *args, int from_tty,
4934 struct cmd_list_element *c)
4935 {
4936 if (have_live_inferiors ())
4937 {
4938 target_async_permitted_1 = target_async_permitted;
4939 error (_("Cannot change this setting while the inferior is running."));
4940 }
4941
4942 target_async_permitted = target_async_permitted_1;
4943 }
4944
4945 static void
4946 show_target_async_command (struct ui_file *file, int from_tty,
4947 struct cmd_list_element *c,
4948 const char *value)
4949 {
4950 fprintf_filtered (file,
4951 _("Controlling the inferior in "
4952 "asynchronous mode is %s.\n"), value);
4953 }
4954
4955 /* Temporary copies of permission settings. */
4956
4957 static int may_write_registers_1 = 1;
4958 static int may_write_memory_1 = 1;
4959 static int may_insert_breakpoints_1 = 1;
4960 static int may_insert_tracepoints_1 = 1;
4961 static int may_insert_fast_tracepoints_1 = 1;
4962 static int may_stop_1 = 1;
4963
4964 /* Make the user-set values match the real values again. */
4965
4966 void
4967 update_target_permissions (void)
4968 {
4969 may_write_registers_1 = may_write_registers;
4970 may_write_memory_1 = may_write_memory;
4971 may_insert_breakpoints_1 = may_insert_breakpoints;
4972 may_insert_tracepoints_1 = may_insert_tracepoints;
4973 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4974 may_stop_1 = may_stop;
4975 }
4976
4977 /* The one function handles (most of) the permission flags in the same
4978 way. */
4979
4980 static void
4981 set_target_permissions (char *args, int from_tty,
4982 struct cmd_list_element *c)
4983 {
4984 if (target_has_execution)
4985 {
4986 update_target_permissions ();
4987 error (_("Cannot change this setting while the inferior is running."));
4988 }
4989
4990 /* Make the real values match the user-changed values. */
4991 may_write_registers = may_write_registers_1;
4992 may_insert_breakpoints = may_insert_breakpoints_1;
4993 may_insert_tracepoints = may_insert_tracepoints_1;
4994 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4995 may_stop = may_stop_1;
4996 update_observer_mode ();
4997 }
4998
4999 /* Set memory write permission independently of observer mode. */
5000
5001 static void
5002 set_write_memory_permission (char *args, int from_tty,
5003 struct cmd_list_element *c)
5004 {
5005 /* Make the real values match the user-changed values. */
5006 may_write_memory = may_write_memory_1;
5007 update_observer_mode ();
5008 }
5009
5010
5011 void
5012 initialize_targets (void)
5013 {
5014 init_dummy_target ();
5015 push_target (&dummy_target);
5016
5017 add_info ("target", target_info, targ_desc);
5018 add_info ("files", target_info, targ_desc);
5019
5020 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5021 Set target debugging."), _("\
5022 Show target debugging."), _("\
5023 When non-zero, target debugging is enabled. Higher numbers are more\n\
5024 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5025 command."),
5026 NULL,
5027 show_targetdebug,
5028 &setdebuglist, &showdebuglist);
5029
5030 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5031 &trust_readonly, _("\
5032 Set mode for reading from readonly sections."), _("\
5033 Show mode for reading from readonly sections."), _("\
5034 When this mode is on, memory reads from readonly sections (such as .text)\n\
5035 will be read from the object file instead of from the target. This will\n\
5036 result in significant performance improvement for remote targets."),
5037 NULL,
5038 show_trust_readonly,
5039 &setlist, &showlist);
5040
5041 add_com ("monitor", class_obscure, do_monitor_command,
5042 _("Send a command to the remote monitor (remote targets only)."));
5043
5044 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5045 _("Print the name of each layer of the internal target stack."),
5046 &maintenanceprintlist);
5047
5048 add_setshow_boolean_cmd ("target-async", no_class,
5049 &target_async_permitted_1, _("\
5050 Set whether gdb controls the inferior in asynchronous mode."), _("\
5051 Show whether gdb controls the inferior in asynchronous mode."), _("\
5052 Tells gdb whether to control the inferior in asynchronous mode."),
5053 set_target_async_command,
5054 show_target_async_command,
5055 &setlist,
5056 &showlist);
5057
5058 add_setshow_boolean_cmd ("stack-cache", class_support,
5059 &stack_cache_enabled_p_1, _("\
5060 Set cache use for stack access."), _("\
5061 Show cache use for stack access."), _("\
5062 When on, use the data cache for all stack access, regardless of any\n\
5063 configured memory regions. This improves remote performance significantly.\n\
5064 By default, caching for stack access is on."),
5065 set_stack_cache_enabled_p,
5066 show_stack_cache_enabled_p,
5067 &setlist, &showlist);
5068
5069 add_setshow_boolean_cmd ("may-write-registers", class_support,
5070 &may_write_registers_1, _("\
5071 Set permission to write into registers."), _("\
5072 Show permission to write into registers."), _("\
5073 When this permission is on, GDB may write into the target's registers.\n\
5074 Otherwise, any sort of write attempt will result in an error."),
5075 set_target_permissions, NULL,
5076 &setlist, &showlist);
5077
5078 add_setshow_boolean_cmd ("may-write-memory", class_support,
5079 &may_write_memory_1, _("\
5080 Set permission to write into target memory."), _("\
5081 Show permission to write into target memory."), _("\
5082 When this permission is on, GDB may write into the target's memory.\n\
5083 Otherwise, any sort of write attempt will result in an error."),
5084 set_write_memory_permission, NULL,
5085 &setlist, &showlist);
5086
5087 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5088 &may_insert_breakpoints_1, _("\
5089 Set permission to insert breakpoints in the target."), _("\
5090 Show permission to insert breakpoints in the target."), _("\
5091 When this permission is on, GDB may insert breakpoints in the program.\n\
5092 Otherwise, any sort of insertion attempt will result in an error."),
5093 set_target_permissions, NULL,
5094 &setlist, &showlist);
5095
5096 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5097 &may_insert_tracepoints_1, _("\
5098 Set permission to insert tracepoints in the target."), _("\
5099 Show permission to insert tracepoints in the target."), _("\
5100 When this permission is on, GDB may insert tracepoints in the program.\n\
5101 Otherwise, any sort of insertion attempt will result in an error."),
5102 set_target_permissions, NULL,
5103 &setlist, &showlist);
5104
5105 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5106 &may_insert_fast_tracepoints_1, _("\
5107 Set permission to insert fast tracepoints in the target."), _("\
5108 Show permission to insert fast tracepoints in the target."), _("\
5109 When this permission is on, GDB may insert fast tracepoints.\n\
5110 Otherwise, any sort of insertion attempt will result in an error."),
5111 set_target_permissions, NULL,
5112 &setlist, &showlist);
5113
5114 add_setshow_boolean_cmd ("may-interrupt", class_support,
5115 &may_stop_1, _("\
5116 Set permission to interrupt or signal the target."), _("\
5117 Show permission to interrupt or signal the target."), _("\
5118 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5119 Otherwise, any attempt to interrupt or stop will be ignored."),
5120 set_target_permissions, NULL,
5121 &setlist, &showlist);
5122
5123
5124 target_dcache = dcache_init ();
5125 }
This page took 0.134882 seconds and 4 git commands to generate.