Change to_xfer_partial 'len' type to ULONGEST.
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 static void *return_null (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static target_xfer_partial_ftype default_xfer_partial;
76
77 static target_xfer_partial_ftype current_xfer_partial;
78
79 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
80 ptid_t ptid);
81
82 static void init_dummy_target (void);
83
84 static struct target_ops debug_target;
85
86 static void debug_to_open (char *, int);
87
88 static void debug_to_prepare_to_store (struct regcache *);
89
90 static void debug_to_files_info (struct target_ops *);
91
92 static int debug_to_insert_breakpoint (struct gdbarch *,
93 struct bp_target_info *);
94
95 static int debug_to_remove_breakpoint (struct gdbarch *,
96 struct bp_target_info *);
97
98 static int debug_to_can_use_hw_breakpoint (int, int, int);
99
100 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
101 struct bp_target_info *);
102
103 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
107 struct expression *);
108
109 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
110 struct expression *);
111
112 static int debug_to_stopped_by_watchpoint (void);
113
114 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
115
116 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
117 CORE_ADDR, CORE_ADDR, int);
118
119 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
120
121 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
122 struct expression *);
123
124 static void debug_to_terminal_init (void);
125
126 static void debug_to_terminal_inferior (void);
127
128 static void debug_to_terminal_ours_for_output (void);
129
130 static void debug_to_terminal_save_ours (void);
131
132 static void debug_to_terminal_ours (void);
133
134 static void debug_to_load (char *, int);
135
136 static int debug_to_can_run (void);
137
138 static void debug_to_stop (ptid_t);
139
140 /* Pointer to array of target architecture structures; the size of the
141 array; the current index into the array; the allocated size of the
142 array. */
143 struct target_ops **target_structs;
144 unsigned target_struct_size;
145 unsigned target_struct_allocsize;
146 #define DEFAULT_ALLOCSIZE 10
147
148 /* The initial current target, so that there is always a semi-valid
149 current target. */
150
151 static struct target_ops dummy_target;
152
153 /* Top of target stack. */
154
155 static struct target_ops *target_stack;
156
157 /* The target structure we are currently using to talk to a process
158 or file or whatever "inferior" we have. */
159
160 struct target_ops current_target;
161
162 /* Command list for target. */
163
164 static struct cmd_list_element *targetlist = NULL;
165
166 /* Nonzero if we should trust readonly sections from the
167 executable when reading memory. */
168
169 static int trust_readonly = 0;
170
171 /* Nonzero if we should show true memory content including
172 memory breakpoint inserted by gdb. */
173
174 static int show_memory_breakpoints = 0;
175
176 /* These globals control whether GDB attempts to perform these
177 operations; they are useful for targets that need to prevent
178 inadvertant disruption, such as in non-stop mode. */
179
180 int may_write_registers = 1;
181
182 int may_write_memory = 1;
183
184 int may_insert_breakpoints = 1;
185
186 int may_insert_tracepoints = 1;
187
188 int may_insert_fast_tracepoints = 1;
189
190 int may_stop = 1;
191
192 /* Non-zero if we want to see trace of target level stuff. */
193
194 static unsigned int targetdebug = 0;
195 static void
196 show_targetdebug (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
200 }
201
202 static void setup_target_debug (void);
203
204 /* The user just typed 'target' without the name of a target. */
205
206 static void
207 target_command (char *arg, int from_tty)
208 {
209 fputs_filtered ("Argument required (target name). Try `help target'\n",
210 gdb_stdout);
211 }
212
213 /* Default target_has_* methods for process_stratum targets. */
214
215 int
216 default_child_has_all_memory (struct target_ops *ops)
217 {
218 /* If no inferior selected, then we can't read memory here. */
219 if (ptid_equal (inferior_ptid, null_ptid))
220 return 0;
221
222 return 1;
223 }
224
225 int
226 default_child_has_memory (struct target_ops *ops)
227 {
228 /* If no inferior selected, then we can't read memory here. */
229 if (ptid_equal (inferior_ptid, null_ptid))
230 return 0;
231
232 return 1;
233 }
234
235 int
236 default_child_has_stack (struct target_ops *ops)
237 {
238 /* If no inferior selected, there's no stack. */
239 if (ptid_equal (inferior_ptid, null_ptid))
240 return 0;
241
242 return 1;
243 }
244
245 int
246 default_child_has_registers (struct target_ops *ops)
247 {
248 /* Can't read registers from no inferior. */
249 if (ptid_equal (inferior_ptid, null_ptid))
250 return 0;
251
252 return 1;
253 }
254
255 int
256 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
257 {
258 /* If there's no thread selected, then we can't make it run through
259 hoops. */
260 if (ptid_equal (the_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266
267 int
268 target_has_all_memory_1 (void)
269 {
270 struct target_ops *t;
271
272 for (t = current_target.beneath; t != NULL; t = t->beneath)
273 if (t->to_has_all_memory (t))
274 return 1;
275
276 return 0;
277 }
278
279 int
280 target_has_memory_1 (void)
281 {
282 struct target_ops *t;
283
284 for (t = current_target.beneath; t != NULL; t = t->beneath)
285 if (t->to_has_memory (t))
286 return 1;
287
288 return 0;
289 }
290
291 int
292 target_has_stack_1 (void)
293 {
294 struct target_ops *t;
295
296 for (t = current_target.beneath; t != NULL; t = t->beneath)
297 if (t->to_has_stack (t))
298 return 1;
299
300 return 0;
301 }
302
303 int
304 target_has_registers_1 (void)
305 {
306 struct target_ops *t;
307
308 for (t = current_target.beneath; t != NULL; t = t->beneath)
309 if (t->to_has_registers (t))
310 return 1;
311
312 return 0;
313 }
314
315 int
316 target_has_execution_1 (ptid_t the_ptid)
317 {
318 struct target_ops *t;
319
320 for (t = current_target.beneath; t != NULL; t = t->beneath)
321 if (t->to_has_execution (t, the_ptid))
322 return 1;
323
324 return 0;
325 }
326
327 int
328 target_has_execution_current (void)
329 {
330 return target_has_execution_1 (inferior_ptid);
331 }
332
333 /* Complete initialization of T. This ensures that various fields in
334 T are set, if needed by the target implementation. */
335
336 void
337 complete_target_initialization (struct target_ops *t)
338 {
339 /* Provide default values for all "must have" methods. */
340 if (t->to_xfer_partial == NULL)
341 t->to_xfer_partial = default_xfer_partial;
342
343 if (t->to_has_all_memory == NULL)
344 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
345
346 if (t->to_has_memory == NULL)
347 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
348
349 if (t->to_has_stack == NULL)
350 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
351
352 if (t->to_has_registers == NULL)
353 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
354
355 if (t->to_has_execution == NULL)
356 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
357 }
358
359 /* Add possible target architecture T to the list and add a new
360 command 'target T->to_shortname'. Set COMPLETER as the command's
361 completer if not NULL. */
362
363 void
364 add_target_with_completer (struct target_ops *t,
365 completer_ftype *completer)
366 {
367 struct cmd_list_element *c;
368
369 complete_target_initialization (t);
370
371 if (!target_structs)
372 {
373 target_struct_allocsize = DEFAULT_ALLOCSIZE;
374 target_structs = (struct target_ops **) xmalloc
375 (target_struct_allocsize * sizeof (*target_structs));
376 }
377 if (target_struct_size >= target_struct_allocsize)
378 {
379 target_struct_allocsize *= 2;
380 target_structs = (struct target_ops **)
381 xrealloc ((char *) target_structs,
382 target_struct_allocsize * sizeof (*target_structs));
383 }
384 target_structs[target_struct_size++] = t;
385
386 if (targetlist == NULL)
387 add_prefix_cmd ("target", class_run, target_command, _("\
388 Connect to a target machine or process.\n\
389 The first argument is the type or protocol of the target machine.\n\
390 Remaining arguments are interpreted by the target protocol. For more\n\
391 information on the arguments for a particular protocol, type\n\
392 `help target ' followed by the protocol name."),
393 &targetlist, "target ", 0, &cmdlist);
394 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
395 &targetlist);
396 if (completer != NULL)
397 set_cmd_completer (c, completer);
398 }
399
400 /* Add a possible target architecture to the list. */
401
402 void
403 add_target (struct target_ops *t)
404 {
405 add_target_with_completer (t, NULL);
406 }
407
408 /* See target.h. */
409
410 void
411 add_deprecated_target_alias (struct target_ops *t, char *alias)
412 {
413 struct cmd_list_element *c;
414 char *alt;
415
416 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
417 see PR cli/15104. */
418 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
419 alt = xstrprintf ("target %s", t->to_shortname);
420 deprecate_cmd (c, alt);
421 }
422
423 /* Stub functions */
424
425 void
426 target_ignore (void)
427 {
428 }
429
430 void
431 target_kill (void)
432 {
433 struct target_ops *t;
434
435 for (t = current_target.beneath; t != NULL; t = t->beneath)
436 if (t->to_kill != NULL)
437 {
438 if (targetdebug)
439 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
440
441 t->to_kill (t);
442 return;
443 }
444
445 noprocess ();
446 }
447
448 void
449 target_load (char *arg, int from_tty)
450 {
451 target_dcache_invalidate ();
452 (*current_target.to_load) (arg, from_tty);
453 }
454
455 void
456 target_create_inferior (char *exec_file, char *args,
457 char **env, int from_tty)
458 {
459 struct target_ops *t;
460
461 for (t = current_target.beneath; t != NULL; t = t->beneath)
462 {
463 if (t->to_create_inferior != NULL)
464 {
465 t->to_create_inferior (t, exec_file, args, env, from_tty);
466 if (targetdebug)
467 fprintf_unfiltered (gdb_stdlog,
468 "target_create_inferior (%s, %s, xxx, %d)\n",
469 exec_file, args, from_tty);
470 return;
471 }
472 }
473
474 internal_error (__FILE__, __LINE__,
475 _("could not find a target to create inferior"));
476 }
477
478 void
479 target_terminal_inferior (void)
480 {
481 /* A background resume (``run&'') should leave GDB in control of the
482 terminal. Use target_can_async_p, not target_is_async_p, since at
483 this point the target is not async yet. However, if sync_execution
484 is not set, we know it will become async prior to resume. */
485 if (target_can_async_p () && !sync_execution)
486 return;
487
488 /* If GDB is resuming the inferior in the foreground, install
489 inferior's terminal modes. */
490 (*current_target.to_terminal_inferior) ();
491 }
492
493 static int
494 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
495 struct target_ops *t)
496 {
497 errno = EIO; /* Can't read/write this location. */
498 return 0; /* No bytes handled. */
499 }
500
501 static void
502 tcomplain (void)
503 {
504 error (_("You can't do that when your target is `%s'"),
505 current_target.to_shortname);
506 }
507
508 void
509 noprocess (void)
510 {
511 error (_("You can't do that without a process to debug."));
512 }
513
514 static void
515 default_terminal_info (const char *args, int from_tty)
516 {
517 printf_unfiltered (_("No saved terminal information.\n"));
518 }
519
520 /* A default implementation for the to_get_ada_task_ptid target method.
521
522 This function builds the PTID by using both LWP and TID as part of
523 the PTID lwp and tid elements. The pid used is the pid of the
524 inferior_ptid. */
525
526 static ptid_t
527 default_get_ada_task_ptid (long lwp, long tid)
528 {
529 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
530 }
531
532 static enum exec_direction_kind
533 default_execution_direction (void)
534 {
535 if (!target_can_execute_reverse)
536 return EXEC_FORWARD;
537 else if (!target_can_async_p ())
538 return EXEC_FORWARD;
539 else
540 gdb_assert_not_reached ("\
541 to_execution_direction must be implemented for reverse async");
542 }
543
544 /* Go through the target stack from top to bottom, copying over zero
545 entries in current_target, then filling in still empty entries. In
546 effect, we are doing class inheritance through the pushed target
547 vectors.
548
549 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
550 is currently implemented, is that it discards any knowledge of
551 which target an inherited method originally belonged to.
552 Consequently, new new target methods should instead explicitly and
553 locally search the target stack for the target that can handle the
554 request. */
555
556 static void
557 update_current_target (void)
558 {
559 struct target_ops *t;
560
561 /* First, reset current's contents. */
562 memset (&current_target, 0, sizeof (current_target));
563
564 #define INHERIT(FIELD, TARGET) \
565 if (!current_target.FIELD) \
566 current_target.FIELD = (TARGET)->FIELD
567
568 for (t = target_stack; t; t = t->beneath)
569 {
570 INHERIT (to_shortname, t);
571 INHERIT (to_longname, t);
572 INHERIT (to_doc, t);
573 /* Do not inherit to_open. */
574 /* Do not inherit to_close. */
575 /* Do not inherit to_attach. */
576 INHERIT (to_post_attach, t);
577 INHERIT (to_attach_no_wait, t);
578 /* Do not inherit to_detach. */
579 /* Do not inherit to_disconnect. */
580 /* Do not inherit to_resume. */
581 /* Do not inherit to_wait. */
582 /* Do not inherit to_fetch_registers. */
583 /* Do not inherit to_store_registers. */
584 INHERIT (to_prepare_to_store, t);
585 INHERIT (deprecated_xfer_memory, t);
586 INHERIT (to_files_info, t);
587 INHERIT (to_insert_breakpoint, t);
588 INHERIT (to_remove_breakpoint, t);
589 INHERIT (to_can_use_hw_breakpoint, t);
590 INHERIT (to_insert_hw_breakpoint, t);
591 INHERIT (to_remove_hw_breakpoint, t);
592 /* Do not inherit to_ranged_break_num_registers. */
593 INHERIT (to_insert_watchpoint, t);
594 INHERIT (to_remove_watchpoint, t);
595 /* Do not inherit to_insert_mask_watchpoint. */
596 /* Do not inherit to_remove_mask_watchpoint. */
597 INHERIT (to_stopped_data_address, t);
598 INHERIT (to_have_steppable_watchpoint, t);
599 INHERIT (to_have_continuable_watchpoint, t);
600 INHERIT (to_stopped_by_watchpoint, t);
601 INHERIT (to_watchpoint_addr_within_range, t);
602 INHERIT (to_region_ok_for_hw_watchpoint, t);
603 INHERIT (to_can_accel_watchpoint_condition, t);
604 /* Do not inherit to_masked_watch_num_registers. */
605 INHERIT (to_terminal_init, t);
606 INHERIT (to_terminal_inferior, t);
607 INHERIT (to_terminal_ours_for_output, t);
608 INHERIT (to_terminal_ours, t);
609 INHERIT (to_terminal_save_ours, t);
610 INHERIT (to_terminal_info, t);
611 /* Do not inherit to_kill. */
612 INHERIT (to_load, t);
613 /* Do no inherit to_create_inferior. */
614 INHERIT (to_post_startup_inferior, t);
615 INHERIT (to_insert_fork_catchpoint, t);
616 INHERIT (to_remove_fork_catchpoint, t);
617 INHERIT (to_insert_vfork_catchpoint, t);
618 INHERIT (to_remove_vfork_catchpoint, t);
619 /* Do not inherit to_follow_fork. */
620 INHERIT (to_insert_exec_catchpoint, t);
621 INHERIT (to_remove_exec_catchpoint, t);
622 INHERIT (to_set_syscall_catchpoint, t);
623 INHERIT (to_has_exited, t);
624 /* Do not inherit to_mourn_inferior. */
625 INHERIT (to_can_run, t);
626 /* Do not inherit to_pass_signals. */
627 /* Do not inherit to_program_signals. */
628 /* Do not inherit to_thread_alive. */
629 /* Do not inherit to_find_new_threads. */
630 /* Do not inherit to_pid_to_str. */
631 INHERIT (to_extra_thread_info, t);
632 INHERIT (to_thread_name, t);
633 INHERIT (to_stop, t);
634 /* Do not inherit to_xfer_partial. */
635 INHERIT (to_rcmd, t);
636 INHERIT (to_pid_to_exec_file, t);
637 INHERIT (to_log_command, t);
638 INHERIT (to_stratum, t);
639 /* Do not inherit to_has_all_memory. */
640 /* Do not inherit to_has_memory. */
641 /* Do not inherit to_has_stack. */
642 /* Do not inherit to_has_registers. */
643 /* Do not inherit to_has_execution. */
644 INHERIT (to_has_thread_control, t);
645 INHERIT (to_can_async_p, t);
646 INHERIT (to_is_async_p, t);
647 INHERIT (to_async, t);
648 INHERIT (to_find_memory_regions, t);
649 INHERIT (to_make_corefile_notes, t);
650 INHERIT (to_get_bookmark, t);
651 INHERIT (to_goto_bookmark, t);
652 /* Do not inherit to_get_thread_local_address. */
653 INHERIT (to_can_execute_reverse, t);
654 INHERIT (to_execution_direction, t);
655 INHERIT (to_thread_architecture, t);
656 /* Do not inherit to_read_description. */
657 INHERIT (to_get_ada_task_ptid, t);
658 /* Do not inherit to_search_memory. */
659 INHERIT (to_supports_multi_process, t);
660 INHERIT (to_supports_enable_disable_tracepoint, t);
661 INHERIT (to_supports_string_tracing, t);
662 INHERIT (to_trace_init, t);
663 INHERIT (to_download_tracepoint, t);
664 INHERIT (to_can_download_tracepoint, t);
665 INHERIT (to_download_trace_state_variable, t);
666 INHERIT (to_enable_tracepoint, t);
667 INHERIT (to_disable_tracepoint, t);
668 INHERIT (to_trace_set_readonly_regions, t);
669 INHERIT (to_trace_start, t);
670 INHERIT (to_get_trace_status, t);
671 INHERIT (to_get_tracepoint_status, t);
672 INHERIT (to_trace_stop, t);
673 INHERIT (to_trace_find, t);
674 INHERIT (to_get_trace_state_variable_value, t);
675 INHERIT (to_save_trace_data, t);
676 INHERIT (to_upload_tracepoints, t);
677 INHERIT (to_upload_trace_state_variables, t);
678 INHERIT (to_get_raw_trace_data, t);
679 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
680 INHERIT (to_set_disconnected_tracing, t);
681 INHERIT (to_set_circular_trace_buffer, t);
682 INHERIT (to_set_trace_buffer_size, t);
683 INHERIT (to_set_trace_notes, t);
684 INHERIT (to_get_tib_address, t);
685 INHERIT (to_set_permissions, t);
686 INHERIT (to_static_tracepoint_marker_at, t);
687 INHERIT (to_static_tracepoint_markers_by_strid, t);
688 INHERIT (to_traceframe_info, t);
689 INHERIT (to_use_agent, t);
690 INHERIT (to_can_use_agent, t);
691 INHERIT (to_augmented_libraries_svr4_read, t);
692 INHERIT (to_magic, t);
693 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
694 INHERIT (to_can_run_breakpoint_commands, t);
695 /* Do not inherit to_memory_map. */
696 /* Do not inherit to_flash_erase. */
697 /* Do not inherit to_flash_done. */
698 }
699 #undef INHERIT
700
701 /* Clean up a target struct so it no longer has any zero pointers in
702 it. Some entries are defaulted to a method that print an error,
703 others are hard-wired to a standard recursive default. */
704
705 #define de_fault(field, value) \
706 if (!current_target.field) \
707 current_target.field = value
708
709 de_fault (to_open,
710 (void (*) (char *, int))
711 tcomplain);
712 de_fault (to_close,
713 (void (*) (void))
714 target_ignore);
715 de_fault (to_post_attach,
716 (void (*) (int))
717 target_ignore);
718 de_fault (to_prepare_to_store,
719 (void (*) (struct regcache *))
720 noprocess);
721 de_fault (deprecated_xfer_memory,
722 (int (*) (CORE_ADDR, gdb_byte *, int, int,
723 struct mem_attrib *, struct target_ops *))
724 nomemory);
725 de_fault (to_files_info,
726 (void (*) (struct target_ops *))
727 target_ignore);
728 de_fault (to_insert_breakpoint,
729 memory_insert_breakpoint);
730 de_fault (to_remove_breakpoint,
731 memory_remove_breakpoint);
732 de_fault (to_can_use_hw_breakpoint,
733 (int (*) (int, int, int))
734 return_zero);
735 de_fault (to_insert_hw_breakpoint,
736 (int (*) (struct gdbarch *, struct bp_target_info *))
737 return_minus_one);
738 de_fault (to_remove_hw_breakpoint,
739 (int (*) (struct gdbarch *, struct bp_target_info *))
740 return_minus_one);
741 de_fault (to_insert_watchpoint,
742 (int (*) (CORE_ADDR, int, int, struct expression *))
743 return_minus_one);
744 de_fault (to_remove_watchpoint,
745 (int (*) (CORE_ADDR, int, int, struct expression *))
746 return_minus_one);
747 de_fault (to_stopped_by_watchpoint,
748 (int (*) (void))
749 return_zero);
750 de_fault (to_stopped_data_address,
751 (int (*) (struct target_ops *, CORE_ADDR *))
752 return_zero);
753 de_fault (to_watchpoint_addr_within_range,
754 default_watchpoint_addr_within_range);
755 de_fault (to_region_ok_for_hw_watchpoint,
756 default_region_ok_for_hw_watchpoint);
757 de_fault (to_can_accel_watchpoint_condition,
758 (int (*) (CORE_ADDR, int, int, struct expression *))
759 return_zero);
760 de_fault (to_terminal_init,
761 (void (*) (void))
762 target_ignore);
763 de_fault (to_terminal_inferior,
764 (void (*) (void))
765 target_ignore);
766 de_fault (to_terminal_ours_for_output,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_ours,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_save_ours,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_info,
776 default_terminal_info);
777 de_fault (to_load,
778 (void (*) (char *, int))
779 tcomplain);
780 de_fault (to_post_startup_inferior,
781 (void (*) (ptid_t))
782 target_ignore);
783 de_fault (to_insert_fork_catchpoint,
784 (int (*) (int))
785 return_one);
786 de_fault (to_remove_fork_catchpoint,
787 (int (*) (int))
788 return_one);
789 de_fault (to_insert_vfork_catchpoint,
790 (int (*) (int))
791 return_one);
792 de_fault (to_remove_vfork_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_insert_exec_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_remove_exec_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_set_syscall_catchpoint,
802 (int (*) (int, int, int, int, int *))
803 return_one);
804 de_fault (to_has_exited,
805 (int (*) (int, int, int *))
806 return_zero);
807 de_fault (to_can_run,
808 return_zero);
809 de_fault (to_extra_thread_info,
810 (char *(*) (struct thread_info *))
811 return_null);
812 de_fault (to_thread_name,
813 (char *(*) (struct thread_info *))
814 return_null);
815 de_fault (to_stop,
816 (void (*) (ptid_t))
817 target_ignore);
818 current_target.to_xfer_partial = current_xfer_partial;
819 de_fault (to_rcmd,
820 (void (*) (char *, struct ui_file *))
821 tcomplain);
822 de_fault (to_pid_to_exec_file,
823 (char *(*) (int))
824 return_null);
825 de_fault (to_async,
826 (void (*) (void (*) (enum inferior_event_type, void*), void*))
827 tcomplain);
828 de_fault (to_thread_architecture,
829 default_thread_architecture);
830 current_target.to_read_description = NULL;
831 de_fault (to_get_ada_task_ptid,
832 (ptid_t (*) (long, long))
833 default_get_ada_task_ptid);
834 de_fault (to_supports_multi_process,
835 (int (*) (void))
836 return_zero);
837 de_fault (to_supports_enable_disable_tracepoint,
838 (int (*) (void))
839 return_zero);
840 de_fault (to_supports_string_tracing,
841 (int (*) (void))
842 return_zero);
843 de_fault (to_trace_init,
844 (void (*) (void))
845 tcomplain);
846 de_fault (to_download_tracepoint,
847 (void (*) (struct bp_location *))
848 tcomplain);
849 de_fault (to_can_download_tracepoint,
850 (int (*) (void))
851 return_zero);
852 de_fault (to_download_trace_state_variable,
853 (void (*) (struct trace_state_variable *))
854 tcomplain);
855 de_fault (to_enable_tracepoint,
856 (void (*) (struct bp_location *))
857 tcomplain);
858 de_fault (to_disable_tracepoint,
859 (void (*) (struct bp_location *))
860 tcomplain);
861 de_fault (to_trace_set_readonly_regions,
862 (void (*) (void))
863 tcomplain);
864 de_fault (to_trace_start,
865 (void (*) (void))
866 tcomplain);
867 de_fault (to_get_trace_status,
868 (int (*) (struct trace_status *))
869 return_minus_one);
870 de_fault (to_get_tracepoint_status,
871 (void (*) (struct breakpoint *, struct uploaded_tp *))
872 tcomplain);
873 de_fault (to_trace_stop,
874 (void (*) (void))
875 tcomplain);
876 de_fault (to_trace_find,
877 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
878 return_minus_one);
879 de_fault (to_get_trace_state_variable_value,
880 (int (*) (int, LONGEST *))
881 return_zero);
882 de_fault (to_save_trace_data,
883 (int (*) (const char *))
884 tcomplain);
885 de_fault (to_upload_tracepoints,
886 (int (*) (struct uploaded_tp **))
887 return_zero);
888 de_fault (to_upload_trace_state_variables,
889 (int (*) (struct uploaded_tsv **))
890 return_zero);
891 de_fault (to_get_raw_trace_data,
892 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
893 tcomplain);
894 de_fault (to_get_min_fast_tracepoint_insn_len,
895 (int (*) (void))
896 return_minus_one);
897 de_fault (to_set_disconnected_tracing,
898 (void (*) (int))
899 target_ignore);
900 de_fault (to_set_circular_trace_buffer,
901 (void (*) (int))
902 target_ignore);
903 de_fault (to_set_trace_buffer_size,
904 (void (*) (LONGEST))
905 target_ignore);
906 de_fault (to_set_trace_notes,
907 (int (*) (const char *, const char *, const char *))
908 return_zero);
909 de_fault (to_get_tib_address,
910 (int (*) (ptid_t, CORE_ADDR *))
911 tcomplain);
912 de_fault (to_set_permissions,
913 (void (*) (void))
914 target_ignore);
915 de_fault (to_static_tracepoint_marker_at,
916 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
917 return_zero);
918 de_fault (to_static_tracepoint_markers_by_strid,
919 (VEC(static_tracepoint_marker_p) * (*) (const char *))
920 tcomplain);
921 de_fault (to_traceframe_info,
922 (struct traceframe_info * (*) (void))
923 return_null);
924 de_fault (to_supports_evaluation_of_breakpoint_conditions,
925 (int (*) (void))
926 return_zero);
927 de_fault (to_can_run_breakpoint_commands,
928 (int (*) (void))
929 return_zero);
930 de_fault (to_use_agent,
931 (int (*) (int))
932 tcomplain);
933 de_fault (to_can_use_agent,
934 (int (*) (void))
935 return_zero);
936 de_fault (to_augmented_libraries_svr4_read,
937 (int (*) (void))
938 return_zero);
939 de_fault (to_execution_direction, default_execution_direction);
940
941 #undef de_fault
942
943 /* Finally, position the target-stack beneath the squashed
944 "current_target". That way code looking for a non-inherited
945 target method can quickly and simply find it. */
946 current_target.beneath = target_stack;
947
948 if (targetdebug)
949 setup_target_debug ();
950 }
951
952 /* Push a new target type into the stack of the existing target accessors,
953 possibly superseding some of the existing accessors.
954
955 Rather than allow an empty stack, we always have the dummy target at
956 the bottom stratum, so we can call the function vectors without
957 checking them. */
958
959 void
960 push_target (struct target_ops *t)
961 {
962 struct target_ops **cur;
963
964 /* Check magic number. If wrong, it probably means someone changed
965 the struct definition, but not all the places that initialize one. */
966 if (t->to_magic != OPS_MAGIC)
967 {
968 fprintf_unfiltered (gdb_stderr,
969 "Magic number of %s target struct wrong\n",
970 t->to_shortname);
971 internal_error (__FILE__, __LINE__,
972 _("failed internal consistency check"));
973 }
974
975 /* Find the proper stratum to install this target in. */
976 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
977 {
978 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
979 break;
980 }
981
982 /* If there's already targets at this stratum, remove them. */
983 /* FIXME: cagney/2003-10-15: I think this should be popping all
984 targets to CUR, and not just those at this stratum level. */
985 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
986 {
987 /* There's already something at this stratum level. Close it,
988 and un-hook it from the stack. */
989 struct target_ops *tmp = (*cur);
990
991 (*cur) = (*cur)->beneath;
992 tmp->beneath = NULL;
993 target_close (tmp);
994 }
995
996 /* We have removed all targets in our stratum, now add the new one. */
997 t->beneath = (*cur);
998 (*cur) = t;
999
1000 update_current_target ();
1001 }
1002
1003 /* Remove a target_ops vector from the stack, wherever it may be.
1004 Return how many times it was removed (0 or 1). */
1005
1006 int
1007 unpush_target (struct target_ops *t)
1008 {
1009 struct target_ops **cur;
1010 struct target_ops *tmp;
1011
1012 if (t->to_stratum == dummy_stratum)
1013 internal_error (__FILE__, __LINE__,
1014 _("Attempt to unpush the dummy target"));
1015
1016 /* Look for the specified target. Note that we assume that a target
1017 can only occur once in the target stack. */
1018
1019 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1020 {
1021 if ((*cur) == t)
1022 break;
1023 }
1024
1025 /* If we don't find target_ops, quit. Only open targets should be
1026 closed. */
1027 if ((*cur) == NULL)
1028 return 0;
1029
1030 /* Unchain the target. */
1031 tmp = (*cur);
1032 (*cur) = (*cur)->beneath;
1033 tmp->beneath = NULL;
1034
1035 update_current_target ();
1036
1037 /* Finally close the target. Note we do this after unchaining, so
1038 any target method calls from within the target_close
1039 implementation don't end up in T anymore. */
1040 target_close (t);
1041
1042 return 1;
1043 }
1044
1045 void
1046 pop_all_targets_above (enum strata above_stratum)
1047 {
1048 while ((int) (current_target.to_stratum) > (int) above_stratum)
1049 {
1050 if (!unpush_target (target_stack))
1051 {
1052 fprintf_unfiltered (gdb_stderr,
1053 "pop_all_targets couldn't find target %s\n",
1054 target_stack->to_shortname);
1055 internal_error (__FILE__, __LINE__,
1056 _("failed internal consistency check"));
1057 break;
1058 }
1059 }
1060 }
1061
1062 void
1063 pop_all_targets (void)
1064 {
1065 pop_all_targets_above (dummy_stratum);
1066 }
1067
1068 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1069
1070 int
1071 target_is_pushed (struct target_ops *t)
1072 {
1073 struct target_ops **cur;
1074
1075 /* Check magic number. If wrong, it probably means someone changed
1076 the struct definition, but not all the places that initialize one. */
1077 if (t->to_magic != OPS_MAGIC)
1078 {
1079 fprintf_unfiltered (gdb_stderr,
1080 "Magic number of %s target struct wrong\n",
1081 t->to_shortname);
1082 internal_error (__FILE__, __LINE__,
1083 _("failed internal consistency check"));
1084 }
1085
1086 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1087 if (*cur == t)
1088 return 1;
1089
1090 return 0;
1091 }
1092
1093 /* Using the objfile specified in OBJFILE, find the address for the
1094 current thread's thread-local storage with offset OFFSET. */
1095 CORE_ADDR
1096 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1097 {
1098 volatile CORE_ADDR addr = 0;
1099 struct target_ops *target;
1100
1101 for (target = current_target.beneath;
1102 target != NULL;
1103 target = target->beneath)
1104 {
1105 if (target->to_get_thread_local_address != NULL)
1106 break;
1107 }
1108
1109 if (target != NULL
1110 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1111 {
1112 ptid_t ptid = inferior_ptid;
1113 volatile struct gdb_exception ex;
1114
1115 TRY_CATCH (ex, RETURN_MASK_ALL)
1116 {
1117 CORE_ADDR lm_addr;
1118
1119 /* Fetch the load module address for this objfile. */
1120 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1121 objfile);
1122 /* If it's 0, throw the appropriate exception. */
1123 if (lm_addr == 0)
1124 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1125 _("TLS load module not found"));
1126
1127 addr = target->to_get_thread_local_address (target, ptid,
1128 lm_addr, offset);
1129 }
1130 /* If an error occurred, print TLS related messages here. Otherwise,
1131 throw the error to some higher catcher. */
1132 if (ex.reason < 0)
1133 {
1134 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1135
1136 switch (ex.error)
1137 {
1138 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1139 error (_("Cannot find thread-local variables "
1140 "in this thread library."));
1141 break;
1142 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1143 if (objfile_is_library)
1144 error (_("Cannot find shared library `%s' in dynamic"
1145 " linker's load module list"), objfile_name (objfile));
1146 else
1147 error (_("Cannot find executable file `%s' in dynamic"
1148 " linker's load module list"), objfile_name (objfile));
1149 break;
1150 case TLS_NOT_ALLOCATED_YET_ERROR:
1151 if (objfile_is_library)
1152 error (_("The inferior has not yet allocated storage for"
1153 " thread-local variables in\n"
1154 "the shared library `%s'\n"
1155 "for %s"),
1156 objfile_name (objfile), target_pid_to_str (ptid));
1157 else
1158 error (_("The inferior has not yet allocated storage for"
1159 " thread-local variables in\n"
1160 "the executable `%s'\n"
1161 "for %s"),
1162 objfile_name (objfile), target_pid_to_str (ptid));
1163 break;
1164 case TLS_GENERIC_ERROR:
1165 if (objfile_is_library)
1166 error (_("Cannot find thread-local storage for %s, "
1167 "shared library %s:\n%s"),
1168 target_pid_to_str (ptid),
1169 objfile_name (objfile), ex.message);
1170 else
1171 error (_("Cannot find thread-local storage for %s, "
1172 "executable file %s:\n%s"),
1173 target_pid_to_str (ptid),
1174 objfile_name (objfile), ex.message);
1175 break;
1176 default:
1177 throw_exception (ex);
1178 break;
1179 }
1180 }
1181 }
1182 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1183 TLS is an ABI-specific thing. But we don't do that yet. */
1184 else
1185 error (_("Cannot find thread-local variables on this target"));
1186
1187 return addr;
1188 }
1189
1190 const char *
1191 target_xfer_error_to_string (enum target_xfer_error err)
1192 {
1193 #define CASE(X) case X: return #X
1194 switch (err)
1195 {
1196 CASE(TARGET_XFER_E_IO);
1197 CASE(TARGET_XFER_E_UNAVAILABLE);
1198 default:
1199 return "<unknown>";
1200 }
1201 #undef CASE
1202 };
1203
1204
1205 #undef MIN
1206 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1207
1208 /* target_read_string -- read a null terminated string, up to LEN bytes,
1209 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1210 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1211 is responsible for freeing it. Return the number of bytes successfully
1212 read. */
1213
1214 int
1215 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1216 {
1217 int tlen, offset, i;
1218 gdb_byte buf[4];
1219 int errcode = 0;
1220 char *buffer;
1221 int buffer_allocated;
1222 char *bufptr;
1223 unsigned int nbytes_read = 0;
1224
1225 gdb_assert (string);
1226
1227 /* Small for testing. */
1228 buffer_allocated = 4;
1229 buffer = xmalloc (buffer_allocated);
1230 bufptr = buffer;
1231
1232 while (len > 0)
1233 {
1234 tlen = MIN (len, 4 - (memaddr & 3));
1235 offset = memaddr & 3;
1236
1237 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1238 if (errcode != 0)
1239 {
1240 /* The transfer request might have crossed the boundary to an
1241 unallocated region of memory. Retry the transfer, requesting
1242 a single byte. */
1243 tlen = 1;
1244 offset = 0;
1245 errcode = target_read_memory (memaddr, buf, 1);
1246 if (errcode != 0)
1247 goto done;
1248 }
1249
1250 if (bufptr - buffer + tlen > buffer_allocated)
1251 {
1252 unsigned int bytes;
1253
1254 bytes = bufptr - buffer;
1255 buffer_allocated *= 2;
1256 buffer = xrealloc (buffer, buffer_allocated);
1257 bufptr = buffer + bytes;
1258 }
1259
1260 for (i = 0; i < tlen; i++)
1261 {
1262 *bufptr++ = buf[i + offset];
1263 if (buf[i + offset] == '\000')
1264 {
1265 nbytes_read += i + 1;
1266 goto done;
1267 }
1268 }
1269
1270 memaddr += tlen;
1271 len -= tlen;
1272 nbytes_read += tlen;
1273 }
1274 done:
1275 *string = buffer;
1276 if (errnop != NULL)
1277 *errnop = errcode;
1278 return nbytes_read;
1279 }
1280
1281 struct target_section_table *
1282 target_get_section_table (struct target_ops *target)
1283 {
1284 struct target_ops *t;
1285
1286 if (targetdebug)
1287 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1288
1289 for (t = target; t != NULL; t = t->beneath)
1290 if (t->to_get_section_table != NULL)
1291 return (*t->to_get_section_table) (t);
1292
1293 return NULL;
1294 }
1295
1296 /* Find a section containing ADDR. */
1297
1298 struct target_section *
1299 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1300 {
1301 struct target_section_table *table = target_get_section_table (target);
1302 struct target_section *secp;
1303
1304 if (table == NULL)
1305 return NULL;
1306
1307 for (secp = table->sections; secp < table->sections_end; secp++)
1308 {
1309 if (addr >= secp->addr && addr < secp->endaddr)
1310 return secp;
1311 }
1312 return NULL;
1313 }
1314
1315 /* Read memory from the live target, even if currently inspecting a
1316 traceframe. The return is the same as that of target_read. */
1317
1318 static LONGEST
1319 target_read_live_memory (enum target_object object,
1320 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len)
1321 {
1322 LONGEST ret;
1323 struct cleanup *cleanup;
1324
1325 /* Switch momentarily out of tfind mode so to access live memory.
1326 Note that this must not clear global state, such as the frame
1327 cache, which must still remain valid for the previous traceframe.
1328 We may be _building_ the frame cache at this point. */
1329 cleanup = make_cleanup_restore_traceframe_number ();
1330 set_traceframe_number (-1);
1331
1332 ret = target_read (current_target.beneath, object, NULL,
1333 myaddr, memaddr, len);
1334
1335 do_cleanups (cleanup);
1336 return ret;
1337 }
1338
1339 /* Using the set of read-only target sections of OPS, read live
1340 read-only memory. Note that the actual reads start from the
1341 top-most target again.
1342
1343 For interface/parameters/return description see target.h,
1344 to_xfer_partial. */
1345
1346 static LONGEST
1347 memory_xfer_live_readonly_partial (struct target_ops *ops,
1348 enum target_object object,
1349 gdb_byte *readbuf, ULONGEST memaddr,
1350 ULONGEST len)
1351 {
1352 struct target_section *secp;
1353 struct target_section_table *table;
1354
1355 secp = target_section_by_addr (ops, memaddr);
1356 if (secp != NULL
1357 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1358 secp->the_bfd_section)
1359 & SEC_READONLY))
1360 {
1361 struct target_section *p;
1362 ULONGEST memend = memaddr + len;
1363
1364 table = target_get_section_table (ops);
1365
1366 for (p = table->sections; p < table->sections_end; p++)
1367 {
1368 if (memaddr >= p->addr)
1369 {
1370 if (memend <= p->endaddr)
1371 {
1372 /* Entire transfer is within this section. */
1373 return target_read_live_memory (object, memaddr,
1374 readbuf, len);
1375 }
1376 else if (memaddr >= p->endaddr)
1377 {
1378 /* This section ends before the transfer starts. */
1379 continue;
1380 }
1381 else
1382 {
1383 /* This section overlaps the transfer. Just do half. */
1384 len = p->endaddr - memaddr;
1385 return target_read_live_memory (object, memaddr,
1386 readbuf, len);
1387 }
1388 }
1389 }
1390 }
1391
1392 return 0;
1393 }
1394
1395 /* Read memory from more than one valid target. A core file, for
1396 instance, could have some of memory but delegate other bits to
1397 the target below it. So, we must manually try all targets. */
1398
1399 static LONGEST
1400 raw_memory_xfer_partial (struct target_ops *ops, void *readbuf,
1401 const void *writebuf, ULONGEST memaddr, LONGEST len)
1402 {
1403 LONGEST res;
1404
1405 do
1406 {
1407 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1408 readbuf, writebuf, memaddr, len);
1409 if (res > 0)
1410 break;
1411
1412 /* We want to continue past core files to executables, but not
1413 past a running target's memory. */
1414 if (ops->to_has_all_memory (ops))
1415 break;
1416
1417 ops = ops->beneath;
1418 }
1419 while (ops != NULL);
1420
1421 return res;
1422 }
1423
1424 /* Perform a partial memory transfer.
1425 For docs see target.h, to_xfer_partial. */
1426
1427 static LONGEST
1428 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1429 void *readbuf, const void *writebuf, ULONGEST memaddr,
1430 ULONGEST len)
1431 {
1432 LONGEST res;
1433 int reg_len;
1434 struct mem_region *region;
1435 struct inferior *inf;
1436
1437 /* For accesses to unmapped overlay sections, read directly from
1438 files. Must do this first, as MEMADDR may need adjustment. */
1439 if (readbuf != NULL && overlay_debugging)
1440 {
1441 struct obj_section *section = find_pc_overlay (memaddr);
1442
1443 if (pc_in_unmapped_range (memaddr, section))
1444 {
1445 struct target_section_table *table
1446 = target_get_section_table (ops);
1447 const char *section_name = section->the_bfd_section->name;
1448
1449 memaddr = overlay_mapped_address (memaddr, section);
1450 return section_table_xfer_memory_partial (readbuf, writebuf,
1451 memaddr, len,
1452 table->sections,
1453 table->sections_end,
1454 section_name);
1455 }
1456 }
1457
1458 /* Try the executable files, if "trust-readonly-sections" is set. */
1459 if (readbuf != NULL && trust_readonly)
1460 {
1461 struct target_section *secp;
1462 struct target_section_table *table;
1463
1464 secp = target_section_by_addr (ops, memaddr);
1465 if (secp != NULL
1466 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1467 secp->the_bfd_section)
1468 & SEC_READONLY))
1469 {
1470 table = target_get_section_table (ops);
1471 return section_table_xfer_memory_partial (readbuf, writebuf,
1472 memaddr, len,
1473 table->sections,
1474 table->sections_end,
1475 NULL);
1476 }
1477 }
1478
1479 /* If reading unavailable memory in the context of traceframes, and
1480 this address falls within a read-only section, fallback to
1481 reading from live memory. */
1482 if (readbuf != NULL && get_traceframe_number () != -1)
1483 {
1484 VEC(mem_range_s) *available;
1485
1486 /* If we fail to get the set of available memory, then the
1487 target does not support querying traceframe info, and so we
1488 attempt reading from the traceframe anyway (assuming the
1489 target implements the old QTro packet then). */
1490 if (traceframe_available_memory (&available, memaddr, len))
1491 {
1492 struct cleanup *old_chain;
1493
1494 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1495
1496 if (VEC_empty (mem_range_s, available)
1497 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1498 {
1499 /* Don't read into the traceframe's available
1500 memory. */
1501 if (!VEC_empty (mem_range_s, available))
1502 {
1503 LONGEST oldlen = len;
1504
1505 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1506 gdb_assert (len <= oldlen);
1507 }
1508
1509 do_cleanups (old_chain);
1510
1511 /* This goes through the topmost target again. */
1512 res = memory_xfer_live_readonly_partial (ops, object,
1513 readbuf, memaddr, len);
1514 if (res > 0)
1515 return res;
1516
1517 /* No use trying further, we know some memory starting
1518 at MEMADDR isn't available. */
1519 return TARGET_XFER_E_UNAVAILABLE;
1520 }
1521
1522 /* Don't try to read more than how much is available, in
1523 case the target implements the deprecated QTro packet to
1524 cater for older GDBs (the target's knowledge of read-only
1525 sections may be outdated by now). */
1526 len = VEC_index (mem_range_s, available, 0)->length;
1527
1528 do_cleanups (old_chain);
1529 }
1530 }
1531
1532 /* Try GDB's internal data cache. */
1533 region = lookup_mem_region (memaddr);
1534 /* region->hi == 0 means there's no upper bound. */
1535 if (memaddr + len < region->hi || region->hi == 0)
1536 reg_len = len;
1537 else
1538 reg_len = region->hi - memaddr;
1539
1540 switch (region->attrib.mode)
1541 {
1542 case MEM_RO:
1543 if (writebuf != NULL)
1544 return -1;
1545 break;
1546
1547 case MEM_WO:
1548 if (readbuf != NULL)
1549 return -1;
1550 break;
1551
1552 case MEM_FLASH:
1553 /* We only support writing to flash during "load" for now. */
1554 if (writebuf != NULL)
1555 error (_("Writing to flash memory forbidden in this context"));
1556 break;
1557
1558 case MEM_NONE:
1559 return -1;
1560 }
1561
1562 if (!ptid_equal (inferior_ptid, null_ptid))
1563 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1564 else
1565 inf = NULL;
1566
1567 if (inf != NULL
1568 /* The dcache reads whole cache lines; that doesn't play well
1569 with reading from a trace buffer, because reading outside of
1570 the collected memory range fails. */
1571 && get_traceframe_number () == -1
1572 && (region->attrib.cache
1573 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1574 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1575 {
1576 DCACHE *dcache = target_dcache_get_or_init ();
1577
1578 if (readbuf != NULL)
1579 res = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1580 else
1581 /* FIXME drow/2006-08-09: If we're going to preserve const
1582 correctness dcache_xfer_memory should take readbuf and
1583 writebuf. */
1584 res = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1585 reg_len, 1);
1586 if (res <= 0)
1587 return -1;
1588 else
1589 return res;
1590 }
1591
1592 /* If none of those methods found the memory we wanted, fall back
1593 to a target partial transfer. Normally a single call to
1594 to_xfer_partial is enough; if it doesn't recognize an object
1595 it will call the to_xfer_partial of the next target down.
1596 But for memory this won't do. Memory is the only target
1597 object which can be read from more than one valid target. */
1598 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len);
1599
1600 /* Make sure the cache gets updated no matter what - if we are writing
1601 to the stack. Even if this write is not tagged as such, we still need
1602 to update the cache. */
1603
1604 if (res > 0
1605 && inf != NULL
1606 && writebuf != NULL
1607 && target_dcache_init_p ()
1608 && !region->attrib.cache
1609 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1610 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1611 {
1612 DCACHE *dcache = target_dcache_get ();
1613
1614 dcache_update (dcache, memaddr, (void *) writebuf, res);
1615 }
1616
1617 /* If we still haven't got anything, return the last error. We
1618 give up. */
1619 return res;
1620 }
1621
1622 /* Perform a partial memory transfer. For docs see target.h,
1623 to_xfer_partial. */
1624
1625 static LONGEST
1626 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1627 void *readbuf, const void *writebuf, ULONGEST memaddr,
1628 ULONGEST len)
1629 {
1630 int res;
1631
1632 /* Zero length requests are ok and require no work. */
1633 if (len == 0)
1634 return 0;
1635
1636 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1637 breakpoint insns, thus hiding out from higher layers whether
1638 there are software breakpoints inserted in the code stream. */
1639 if (readbuf != NULL)
1640 {
1641 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1642
1643 if (res > 0 && !show_memory_breakpoints)
1644 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1645 }
1646 else
1647 {
1648 void *buf;
1649 struct cleanup *old_chain;
1650
1651 /* A large write request is likely to be partially satisfied
1652 by memory_xfer_partial_1. We will continually malloc
1653 and free a copy of the entire write request for breakpoint
1654 shadow handling even though we only end up writing a small
1655 subset of it. Cap writes to 4KB to mitigate this. */
1656 len = min (4096, len);
1657
1658 buf = xmalloc (len);
1659 old_chain = make_cleanup (xfree, buf);
1660 memcpy (buf, writebuf, len);
1661
1662 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1663 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1664
1665 do_cleanups (old_chain);
1666 }
1667
1668 return res;
1669 }
1670
1671 static void
1672 restore_show_memory_breakpoints (void *arg)
1673 {
1674 show_memory_breakpoints = (uintptr_t) arg;
1675 }
1676
1677 struct cleanup *
1678 make_show_memory_breakpoints_cleanup (int show)
1679 {
1680 int current = show_memory_breakpoints;
1681
1682 show_memory_breakpoints = show;
1683 return make_cleanup (restore_show_memory_breakpoints,
1684 (void *) (uintptr_t) current);
1685 }
1686
1687 /* For docs see target.h, to_xfer_partial. */
1688
1689 LONGEST
1690 target_xfer_partial (struct target_ops *ops,
1691 enum target_object object, const char *annex,
1692 gdb_byte *readbuf, const gdb_byte *writebuf,
1693 ULONGEST offset, ULONGEST len)
1694 {
1695 LONGEST retval;
1696
1697 gdb_assert (ops->to_xfer_partial != NULL);
1698
1699 if (writebuf && !may_write_memory)
1700 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1701 core_addr_to_string_nz (offset), plongest (len));
1702
1703 /* If this is a memory transfer, let the memory-specific code
1704 have a look at it instead. Memory transfers are more
1705 complicated. */
1706 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1707 || object == TARGET_OBJECT_CODE_MEMORY)
1708 retval = memory_xfer_partial (ops, object, readbuf,
1709 writebuf, offset, len);
1710 else if (object == TARGET_OBJECT_RAW_MEMORY)
1711 {
1712 /* Request the normal memory object from other layers. */
1713 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len);
1714 }
1715 else
1716 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1717 writebuf, offset, len);
1718
1719 if (targetdebug)
1720 {
1721 const unsigned char *myaddr = NULL;
1722
1723 fprintf_unfiltered (gdb_stdlog,
1724 "%s:target_xfer_partial "
1725 "(%d, %s, %s, %s, %s, %s) = %s",
1726 ops->to_shortname,
1727 (int) object,
1728 (annex ? annex : "(null)"),
1729 host_address_to_string (readbuf),
1730 host_address_to_string (writebuf),
1731 core_addr_to_string_nz (offset),
1732 pulongest (len), plongest (retval));
1733
1734 if (readbuf)
1735 myaddr = readbuf;
1736 if (writebuf)
1737 myaddr = writebuf;
1738 if (retval > 0 && myaddr != NULL)
1739 {
1740 int i;
1741
1742 fputs_unfiltered (", bytes =", gdb_stdlog);
1743 for (i = 0; i < retval; i++)
1744 {
1745 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1746 {
1747 if (targetdebug < 2 && i > 0)
1748 {
1749 fprintf_unfiltered (gdb_stdlog, " ...");
1750 break;
1751 }
1752 fprintf_unfiltered (gdb_stdlog, "\n");
1753 }
1754
1755 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1756 }
1757 }
1758
1759 fputc_unfiltered ('\n', gdb_stdlog);
1760 }
1761 return retval;
1762 }
1763
1764 /* Read LEN bytes of target memory at address MEMADDR, placing the
1765 results in GDB's memory at MYADDR. Returns either 0 for success or
1766 a target_xfer_error value if any error occurs.
1767
1768 If an error occurs, no guarantee is made about the contents of the data at
1769 MYADDR. In particular, the caller should not depend upon partial reads
1770 filling the buffer with good data. There is no way for the caller to know
1771 how much good data might have been transfered anyway. Callers that can
1772 deal with partial reads should call target_read (which will retry until
1773 it makes no progress, and then return how much was transferred). */
1774
1775 int
1776 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1777 {
1778 /* Dispatch to the topmost target, not the flattened current_target.
1779 Memory accesses check target->to_has_(all_)memory, and the
1780 flattened target doesn't inherit those. */
1781 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1782 myaddr, memaddr, len) == len)
1783 return 0;
1784 else
1785 return TARGET_XFER_E_IO;
1786 }
1787
1788 /* Like target_read_memory, but specify explicitly that this is a read
1789 from the target's raw memory. That is, this read bypasses the
1790 dcache, breakpoint shadowing, etc. */
1791
1792 int
1793 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1794 {
1795 /* See comment in target_read_memory about why the request starts at
1796 current_target.beneath. */
1797 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1798 myaddr, memaddr, len) == len)
1799 return 0;
1800 else
1801 return TARGET_XFER_E_IO;
1802 }
1803
1804 /* Like target_read_memory, but specify explicitly that this is a read from
1805 the target's stack. This may trigger different cache behavior. */
1806
1807 int
1808 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1809 {
1810 /* See comment in target_read_memory about why the request starts at
1811 current_target.beneath. */
1812 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1813 myaddr, memaddr, len) == len)
1814 return 0;
1815 else
1816 return TARGET_XFER_E_IO;
1817 }
1818
1819 /* Like target_read_memory, but specify explicitly that this is a read from
1820 the target's code. This may trigger different cache behavior. */
1821
1822 int
1823 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1824 {
1825 /* See comment in target_read_memory about why the request starts at
1826 current_target.beneath. */
1827 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1828 myaddr, memaddr, len) == len)
1829 return 0;
1830 else
1831 return TARGET_XFER_E_IO;
1832 }
1833
1834 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1835 Returns either 0 for success or a target_xfer_error value if any
1836 error occurs. If an error occurs, no guarantee is made about how
1837 much data got written. Callers that can deal with partial writes
1838 should call target_write. */
1839
1840 int
1841 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1842 {
1843 /* See comment in target_read_memory about why the request starts at
1844 current_target.beneath. */
1845 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1846 myaddr, memaddr, len) == len)
1847 return 0;
1848 else
1849 return TARGET_XFER_E_IO;
1850 }
1851
1852 /* Write LEN bytes from MYADDR to target raw memory at address
1853 MEMADDR. Returns either 0 for success or a target_xfer_error value
1854 if any error occurs. If an error occurs, no guarantee is made
1855 about how much data got written. Callers that can deal with
1856 partial writes should call target_write. */
1857
1858 int
1859 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1860 {
1861 /* See comment in target_read_memory about why the request starts at
1862 current_target.beneath. */
1863 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1864 myaddr, memaddr, len) == len)
1865 return 0;
1866 else
1867 return TARGET_XFER_E_IO;
1868 }
1869
1870 /* Fetch the target's memory map. */
1871
1872 VEC(mem_region_s) *
1873 target_memory_map (void)
1874 {
1875 VEC(mem_region_s) *result;
1876 struct mem_region *last_one, *this_one;
1877 int ix;
1878 struct target_ops *t;
1879
1880 if (targetdebug)
1881 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1882
1883 for (t = current_target.beneath; t != NULL; t = t->beneath)
1884 if (t->to_memory_map != NULL)
1885 break;
1886
1887 if (t == NULL)
1888 return NULL;
1889
1890 result = t->to_memory_map (t);
1891 if (result == NULL)
1892 return NULL;
1893
1894 qsort (VEC_address (mem_region_s, result),
1895 VEC_length (mem_region_s, result),
1896 sizeof (struct mem_region), mem_region_cmp);
1897
1898 /* Check that regions do not overlap. Simultaneously assign
1899 a numbering for the "mem" commands to use to refer to
1900 each region. */
1901 last_one = NULL;
1902 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1903 {
1904 this_one->number = ix;
1905
1906 if (last_one && last_one->hi > this_one->lo)
1907 {
1908 warning (_("Overlapping regions in memory map: ignoring"));
1909 VEC_free (mem_region_s, result);
1910 return NULL;
1911 }
1912 last_one = this_one;
1913 }
1914
1915 return result;
1916 }
1917
1918 void
1919 target_flash_erase (ULONGEST address, LONGEST length)
1920 {
1921 struct target_ops *t;
1922
1923 for (t = current_target.beneath; t != NULL; t = t->beneath)
1924 if (t->to_flash_erase != NULL)
1925 {
1926 if (targetdebug)
1927 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1928 hex_string (address), phex (length, 0));
1929 t->to_flash_erase (t, address, length);
1930 return;
1931 }
1932
1933 tcomplain ();
1934 }
1935
1936 void
1937 target_flash_done (void)
1938 {
1939 struct target_ops *t;
1940
1941 for (t = current_target.beneath; t != NULL; t = t->beneath)
1942 if (t->to_flash_done != NULL)
1943 {
1944 if (targetdebug)
1945 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1946 t->to_flash_done (t);
1947 return;
1948 }
1949
1950 tcomplain ();
1951 }
1952
1953 static void
1954 show_trust_readonly (struct ui_file *file, int from_tty,
1955 struct cmd_list_element *c, const char *value)
1956 {
1957 fprintf_filtered (file,
1958 _("Mode for reading from readonly sections is %s.\n"),
1959 value);
1960 }
1961
1962 /* More generic transfers. */
1963
1964 static LONGEST
1965 default_xfer_partial (struct target_ops *ops, enum target_object object,
1966 const char *annex, gdb_byte *readbuf,
1967 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
1968 {
1969 if (object == TARGET_OBJECT_MEMORY
1970 && ops->deprecated_xfer_memory != NULL)
1971 /* If available, fall back to the target's
1972 "deprecated_xfer_memory" method. */
1973 {
1974 int xfered = -1;
1975
1976 errno = 0;
1977 if (writebuf != NULL)
1978 {
1979 void *buffer = xmalloc (len);
1980 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1981
1982 memcpy (buffer, writebuf, len);
1983 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1984 1/*write*/, NULL, ops);
1985 do_cleanups (cleanup);
1986 }
1987 if (readbuf != NULL)
1988 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1989 0/*read*/, NULL, ops);
1990 if (xfered > 0)
1991 return xfered;
1992 else if (xfered == 0 && errno == 0)
1993 /* "deprecated_xfer_memory" uses 0, cross checked against
1994 ERRNO as one indication of an error. */
1995 return 0;
1996 else
1997 return -1;
1998 }
1999 else if (ops->beneath != NULL)
2000 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2001 readbuf, writebuf, offset, len);
2002 else
2003 return -1;
2004 }
2005
2006 /* The xfer_partial handler for the topmost target. Unlike the default,
2007 it does not need to handle memory specially; it just passes all
2008 requests down the stack. */
2009
2010 static LONGEST
2011 current_xfer_partial (struct target_ops *ops, enum target_object object,
2012 const char *annex, gdb_byte *readbuf,
2013 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
2014 {
2015 if (ops->beneath != NULL)
2016 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2017 readbuf, writebuf, offset, len);
2018 else
2019 return -1;
2020 }
2021
2022 /* Target vector read/write partial wrapper functions. */
2023
2024 static LONGEST
2025 target_read_partial (struct target_ops *ops,
2026 enum target_object object,
2027 const char *annex, gdb_byte *buf,
2028 ULONGEST offset, LONGEST len)
2029 {
2030 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2031 }
2032
2033 static LONGEST
2034 target_write_partial (struct target_ops *ops,
2035 enum target_object object,
2036 const char *annex, const gdb_byte *buf,
2037 ULONGEST offset, LONGEST len)
2038 {
2039 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2040 }
2041
2042 /* Wrappers to perform the full transfer. */
2043
2044 /* For docs on target_read see target.h. */
2045
2046 LONGEST
2047 target_read (struct target_ops *ops,
2048 enum target_object object,
2049 const char *annex, gdb_byte *buf,
2050 ULONGEST offset, LONGEST len)
2051 {
2052 LONGEST xfered = 0;
2053
2054 while (xfered < len)
2055 {
2056 LONGEST xfer = target_read_partial (ops, object, annex,
2057 (gdb_byte *) buf + xfered,
2058 offset + xfered, len - xfered);
2059
2060 /* Call an observer, notifying them of the xfer progress? */
2061 if (xfer == 0)
2062 return xfered;
2063 if (xfer < 0)
2064 return -1;
2065 xfered += xfer;
2066 QUIT;
2067 }
2068 return len;
2069 }
2070
2071 /* Assuming that the entire [begin, end) range of memory cannot be
2072 read, try to read whatever subrange is possible to read.
2073
2074 The function returns, in RESULT, either zero or one memory block.
2075 If there's a readable subrange at the beginning, it is completely
2076 read and returned. Any further readable subrange will not be read.
2077 Otherwise, if there's a readable subrange at the end, it will be
2078 completely read and returned. Any readable subranges before it
2079 (obviously, not starting at the beginning), will be ignored. In
2080 other cases -- either no readable subrange, or readable subrange(s)
2081 that is neither at the beginning, or end, nothing is returned.
2082
2083 The purpose of this function is to handle a read across a boundary
2084 of accessible memory in a case when memory map is not available.
2085 The above restrictions are fine for this case, but will give
2086 incorrect results if the memory is 'patchy'. However, supporting
2087 'patchy' memory would require trying to read every single byte,
2088 and it seems unacceptable solution. Explicit memory map is
2089 recommended for this case -- and target_read_memory_robust will
2090 take care of reading multiple ranges then. */
2091
2092 static void
2093 read_whatever_is_readable (struct target_ops *ops,
2094 ULONGEST begin, ULONGEST end,
2095 VEC(memory_read_result_s) **result)
2096 {
2097 gdb_byte *buf = xmalloc (end - begin);
2098 ULONGEST current_begin = begin;
2099 ULONGEST current_end = end;
2100 int forward;
2101 memory_read_result_s r;
2102
2103 /* If we previously failed to read 1 byte, nothing can be done here. */
2104 if (end - begin <= 1)
2105 {
2106 xfree (buf);
2107 return;
2108 }
2109
2110 /* Check that either first or the last byte is readable, and give up
2111 if not. This heuristic is meant to permit reading accessible memory
2112 at the boundary of accessible region. */
2113 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2114 buf, begin, 1) == 1)
2115 {
2116 forward = 1;
2117 ++current_begin;
2118 }
2119 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2120 buf + (end-begin) - 1, end - 1, 1) == 1)
2121 {
2122 forward = 0;
2123 --current_end;
2124 }
2125 else
2126 {
2127 xfree (buf);
2128 return;
2129 }
2130
2131 /* Loop invariant is that the [current_begin, current_end) was previously
2132 found to be not readable as a whole.
2133
2134 Note loop condition -- if the range has 1 byte, we can't divide the range
2135 so there's no point trying further. */
2136 while (current_end - current_begin > 1)
2137 {
2138 ULONGEST first_half_begin, first_half_end;
2139 ULONGEST second_half_begin, second_half_end;
2140 LONGEST xfer;
2141 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2142
2143 if (forward)
2144 {
2145 first_half_begin = current_begin;
2146 first_half_end = middle;
2147 second_half_begin = middle;
2148 second_half_end = current_end;
2149 }
2150 else
2151 {
2152 first_half_begin = middle;
2153 first_half_end = current_end;
2154 second_half_begin = current_begin;
2155 second_half_end = middle;
2156 }
2157
2158 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2159 buf + (first_half_begin - begin),
2160 first_half_begin,
2161 first_half_end - first_half_begin);
2162
2163 if (xfer == first_half_end - first_half_begin)
2164 {
2165 /* This half reads up fine. So, the error must be in the
2166 other half. */
2167 current_begin = second_half_begin;
2168 current_end = second_half_end;
2169 }
2170 else
2171 {
2172 /* This half is not readable. Because we've tried one byte, we
2173 know some part of this half if actually redable. Go to the next
2174 iteration to divide again and try to read.
2175
2176 We don't handle the other half, because this function only tries
2177 to read a single readable subrange. */
2178 current_begin = first_half_begin;
2179 current_end = first_half_end;
2180 }
2181 }
2182
2183 if (forward)
2184 {
2185 /* The [begin, current_begin) range has been read. */
2186 r.begin = begin;
2187 r.end = current_begin;
2188 r.data = buf;
2189 }
2190 else
2191 {
2192 /* The [current_end, end) range has been read. */
2193 LONGEST rlen = end - current_end;
2194
2195 r.data = xmalloc (rlen);
2196 memcpy (r.data, buf + current_end - begin, rlen);
2197 r.begin = current_end;
2198 r.end = end;
2199 xfree (buf);
2200 }
2201 VEC_safe_push(memory_read_result_s, (*result), &r);
2202 }
2203
2204 void
2205 free_memory_read_result_vector (void *x)
2206 {
2207 VEC(memory_read_result_s) *v = x;
2208 memory_read_result_s *current;
2209 int ix;
2210
2211 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2212 {
2213 xfree (current->data);
2214 }
2215 VEC_free (memory_read_result_s, v);
2216 }
2217
2218 VEC(memory_read_result_s) *
2219 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2220 {
2221 VEC(memory_read_result_s) *result = 0;
2222
2223 LONGEST xfered = 0;
2224 while (xfered < len)
2225 {
2226 struct mem_region *region = lookup_mem_region (offset + xfered);
2227 LONGEST rlen;
2228
2229 /* If there is no explicit region, a fake one should be created. */
2230 gdb_assert (region);
2231
2232 if (region->hi == 0)
2233 rlen = len - xfered;
2234 else
2235 rlen = region->hi - offset;
2236
2237 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2238 {
2239 /* Cannot read this region. Note that we can end up here only
2240 if the region is explicitly marked inaccessible, or
2241 'inaccessible-by-default' is in effect. */
2242 xfered += rlen;
2243 }
2244 else
2245 {
2246 LONGEST to_read = min (len - xfered, rlen);
2247 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2248
2249 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2250 (gdb_byte *) buffer,
2251 offset + xfered, to_read);
2252 /* Call an observer, notifying them of the xfer progress? */
2253 if (xfer <= 0)
2254 {
2255 /* Got an error reading full chunk. See if maybe we can read
2256 some subrange. */
2257 xfree (buffer);
2258 read_whatever_is_readable (ops, offset + xfered,
2259 offset + xfered + to_read, &result);
2260 xfered += to_read;
2261 }
2262 else
2263 {
2264 struct memory_read_result r;
2265 r.data = buffer;
2266 r.begin = offset + xfered;
2267 r.end = r.begin + xfer;
2268 VEC_safe_push (memory_read_result_s, result, &r);
2269 xfered += xfer;
2270 }
2271 QUIT;
2272 }
2273 }
2274 return result;
2275 }
2276
2277
2278 /* An alternative to target_write with progress callbacks. */
2279
2280 LONGEST
2281 target_write_with_progress (struct target_ops *ops,
2282 enum target_object object,
2283 const char *annex, const gdb_byte *buf,
2284 ULONGEST offset, LONGEST len,
2285 void (*progress) (ULONGEST, void *), void *baton)
2286 {
2287 LONGEST xfered = 0;
2288
2289 /* Give the progress callback a chance to set up. */
2290 if (progress)
2291 (*progress) (0, baton);
2292
2293 while (xfered < len)
2294 {
2295 LONGEST xfer = target_write_partial (ops, object, annex,
2296 (gdb_byte *) buf + xfered,
2297 offset + xfered, len - xfered);
2298
2299 if (xfer == 0)
2300 return xfered;
2301 if (xfer < 0)
2302 return -1;
2303
2304 if (progress)
2305 (*progress) (xfer, baton);
2306
2307 xfered += xfer;
2308 QUIT;
2309 }
2310 return len;
2311 }
2312
2313 /* For docs on target_write see target.h. */
2314
2315 LONGEST
2316 target_write (struct target_ops *ops,
2317 enum target_object object,
2318 const char *annex, const gdb_byte *buf,
2319 ULONGEST offset, LONGEST len)
2320 {
2321 return target_write_with_progress (ops, object, annex, buf, offset, len,
2322 NULL, NULL);
2323 }
2324
2325 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2326 the size of the transferred data. PADDING additional bytes are
2327 available in *BUF_P. This is a helper function for
2328 target_read_alloc; see the declaration of that function for more
2329 information. */
2330
2331 static LONGEST
2332 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2333 const char *annex, gdb_byte **buf_p, int padding)
2334 {
2335 size_t buf_alloc, buf_pos;
2336 gdb_byte *buf;
2337 LONGEST n;
2338
2339 /* This function does not have a length parameter; it reads the
2340 entire OBJECT). Also, it doesn't support objects fetched partly
2341 from one target and partly from another (in a different stratum,
2342 e.g. a core file and an executable). Both reasons make it
2343 unsuitable for reading memory. */
2344 gdb_assert (object != TARGET_OBJECT_MEMORY);
2345
2346 /* Start by reading up to 4K at a time. The target will throttle
2347 this number down if necessary. */
2348 buf_alloc = 4096;
2349 buf = xmalloc (buf_alloc);
2350 buf_pos = 0;
2351 while (1)
2352 {
2353 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2354 buf_pos, buf_alloc - buf_pos - padding);
2355 if (n < 0)
2356 {
2357 /* An error occurred. */
2358 xfree (buf);
2359 return -1;
2360 }
2361 else if (n == 0)
2362 {
2363 /* Read all there was. */
2364 if (buf_pos == 0)
2365 xfree (buf);
2366 else
2367 *buf_p = buf;
2368 return buf_pos;
2369 }
2370
2371 buf_pos += n;
2372
2373 /* If the buffer is filling up, expand it. */
2374 if (buf_alloc < buf_pos * 2)
2375 {
2376 buf_alloc *= 2;
2377 buf = xrealloc (buf, buf_alloc);
2378 }
2379
2380 QUIT;
2381 }
2382 }
2383
2384 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2385 the size of the transferred data. See the declaration in "target.h"
2386 function for more information about the return value. */
2387
2388 LONGEST
2389 target_read_alloc (struct target_ops *ops, enum target_object object,
2390 const char *annex, gdb_byte **buf_p)
2391 {
2392 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2393 }
2394
2395 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2396 returned as a string, allocated using xmalloc. If an error occurs
2397 or the transfer is unsupported, NULL is returned. Empty objects
2398 are returned as allocated but empty strings. A warning is issued
2399 if the result contains any embedded NUL bytes. */
2400
2401 char *
2402 target_read_stralloc (struct target_ops *ops, enum target_object object,
2403 const char *annex)
2404 {
2405 gdb_byte *buffer;
2406 char *bufstr;
2407 LONGEST i, transferred;
2408
2409 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2410 bufstr = (char *) buffer;
2411
2412 if (transferred < 0)
2413 return NULL;
2414
2415 if (transferred == 0)
2416 return xstrdup ("");
2417
2418 bufstr[transferred] = 0;
2419
2420 /* Check for embedded NUL bytes; but allow trailing NULs. */
2421 for (i = strlen (bufstr); i < transferred; i++)
2422 if (bufstr[i] != 0)
2423 {
2424 warning (_("target object %d, annex %s, "
2425 "contained unexpected null characters"),
2426 (int) object, annex ? annex : "(none)");
2427 break;
2428 }
2429
2430 return bufstr;
2431 }
2432
2433 /* Memory transfer methods. */
2434
2435 void
2436 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2437 LONGEST len)
2438 {
2439 /* This method is used to read from an alternate, non-current
2440 target. This read must bypass the overlay support (as symbols
2441 don't match this target), and GDB's internal cache (wrong cache
2442 for this target). */
2443 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2444 != len)
2445 memory_error (TARGET_XFER_E_IO, addr);
2446 }
2447
2448 ULONGEST
2449 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2450 int len, enum bfd_endian byte_order)
2451 {
2452 gdb_byte buf[sizeof (ULONGEST)];
2453
2454 gdb_assert (len <= sizeof (buf));
2455 get_target_memory (ops, addr, buf, len);
2456 return extract_unsigned_integer (buf, len, byte_order);
2457 }
2458
2459 int
2460 target_insert_breakpoint (struct gdbarch *gdbarch,
2461 struct bp_target_info *bp_tgt)
2462 {
2463 if (!may_insert_breakpoints)
2464 {
2465 warning (_("May not insert breakpoints"));
2466 return 1;
2467 }
2468
2469 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2470 }
2471
2472 int
2473 target_remove_breakpoint (struct gdbarch *gdbarch,
2474 struct bp_target_info *bp_tgt)
2475 {
2476 /* This is kind of a weird case to handle, but the permission might
2477 have been changed after breakpoints were inserted - in which case
2478 we should just take the user literally and assume that any
2479 breakpoints should be left in place. */
2480 if (!may_insert_breakpoints)
2481 {
2482 warning (_("May not remove breakpoints"));
2483 return 1;
2484 }
2485
2486 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2487 }
2488
2489 static void
2490 target_info (char *args, int from_tty)
2491 {
2492 struct target_ops *t;
2493 int has_all_mem = 0;
2494
2495 if (symfile_objfile != NULL)
2496 printf_unfiltered (_("Symbols from \"%s\".\n"),
2497 objfile_name (symfile_objfile));
2498
2499 for (t = target_stack; t != NULL; t = t->beneath)
2500 {
2501 if (!(*t->to_has_memory) (t))
2502 continue;
2503
2504 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2505 continue;
2506 if (has_all_mem)
2507 printf_unfiltered (_("\tWhile running this, "
2508 "GDB does not access memory from...\n"));
2509 printf_unfiltered ("%s:\n", t->to_longname);
2510 (t->to_files_info) (t);
2511 has_all_mem = (*t->to_has_all_memory) (t);
2512 }
2513 }
2514
2515 /* This function is called before any new inferior is created, e.g.
2516 by running a program, attaching, or connecting to a target.
2517 It cleans up any state from previous invocations which might
2518 change between runs. This is a subset of what target_preopen
2519 resets (things which might change between targets). */
2520
2521 void
2522 target_pre_inferior (int from_tty)
2523 {
2524 /* Clear out solib state. Otherwise the solib state of the previous
2525 inferior might have survived and is entirely wrong for the new
2526 target. This has been observed on GNU/Linux using glibc 2.3. How
2527 to reproduce:
2528
2529 bash$ ./foo&
2530 [1] 4711
2531 bash$ ./foo&
2532 [1] 4712
2533 bash$ gdb ./foo
2534 [...]
2535 (gdb) attach 4711
2536 (gdb) detach
2537 (gdb) attach 4712
2538 Cannot access memory at address 0xdeadbeef
2539 */
2540
2541 /* In some OSs, the shared library list is the same/global/shared
2542 across inferiors. If code is shared between processes, so are
2543 memory regions and features. */
2544 if (!gdbarch_has_global_solist (target_gdbarch ()))
2545 {
2546 no_shared_libraries (NULL, from_tty);
2547
2548 invalidate_target_mem_regions ();
2549
2550 target_clear_description ();
2551 }
2552
2553 agent_capability_invalidate ();
2554 }
2555
2556 /* Callback for iterate_over_inferiors. Gets rid of the given
2557 inferior. */
2558
2559 static int
2560 dispose_inferior (struct inferior *inf, void *args)
2561 {
2562 struct thread_info *thread;
2563
2564 thread = any_thread_of_process (inf->pid);
2565 if (thread)
2566 {
2567 switch_to_thread (thread->ptid);
2568
2569 /* Core inferiors actually should be detached, not killed. */
2570 if (target_has_execution)
2571 target_kill ();
2572 else
2573 target_detach (NULL, 0);
2574 }
2575
2576 return 0;
2577 }
2578
2579 /* This is to be called by the open routine before it does
2580 anything. */
2581
2582 void
2583 target_preopen (int from_tty)
2584 {
2585 dont_repeat ();
2586
2587 if (have_inferiors ())
2588 {
2589 if (!from_tty
2590 || !have_live_inferiors ()
2591 || query (_("A program is being debugged already. Kill it? ")))
2592 iterate_over_inferiors (dispose_inferior, NULL);
2593 else
2594 error (_("Program not killed."));
2595 }
2596
2597 /* Calling target_kill may remove the target from the stack. But if
2598 it doesn't (which seems like a win for UDI), remove it now. */
2599 /* Leave the exec target, though. The user may be switching from a
2600 live process to a core of the same program. */
2601 pop_all_targets_above (file_stratum);
2602
2603 target_pre_inferior (from_tty);
2604 }
2605
2606 /* Detach a target after doing deferred register stores. */
2607
2608 void
2609 target_detach (const char *args, int from_tty)
2610 {
2611 struct target_ops* t;
2612
2613 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2614 /* Don't remove global breakpoints here. They're removed on
2615 disconnection from the target. */
2616 ;
2617 else
2618 /* If we're in breakpoints-always-inserted mode, have to remove
2619 them before detaching. */
2620 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2621
2622 prepare_for_detach ();
2623
2624 for (t = current_target.beneath; t != NULL; t = t->beneath)
2625 {
2626 if (t->to_detach != NULL)
2627 {
2628 t->to_detach (t, args, from_tty);
2629 if (targetdebug)
2630 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2631 args, from_tty);
2632 return;
2633 }
2634 }
2635
2636 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2637 }
2638
2639 void
2640 target_disconnect (char *args, int from_tty)
2641 {
2642 struct target_ops *t;
2643
2644 /* If we're in breakpoints-always-inserted mode or if breakpoints
2645 are global across processes, we have to remove them before
2646 disconnecting. */
2647 remove_breakpoints ();
2648
2649 for (t = current_target.beneath; t != NULL; t = t->beneath)
2650 if (t->to_disconnect != NULL)
2651 {
2652 if (targetdebug)
2653 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2654 args, from_tty);
2655 t->to_disconnect (t, args, from_tty);
2656 return;
2657 }
2658
2659 tcomplain ();
2660 }
2661
2662 ptid_t
2663 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2664 {
2665 struct target_ops *t;
2666
2667 for (t = current_target.beneath; t != NULL; t = t->beneath)
2668 {
2669 if (t->to_wait != NULL)
2670 {
2671 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2672
2673 if (targetdebug)
2674 {
2675 char *status_string;
2676 char *options_string;
2677
2678 status_string = target_waitstatus_to_string (status);
2679 options_string = target_options_to_string (options);
2680 fprintf_unfiltered (gdb_stdlog,
2681 "target_wait (%d, status, options={%s})"
2682 " = %d, %s\n",
2683 ptid_get_pid (ptid), options_string,
2684 ptid_get_pid (retval), status_string);
2685 xfree (status_string);
2686 xfree (options_string);
2687 }
2688
2689 return retval;
2690 }
2691 }
2692
2693 noprocess ();
2694 }
2695
2696 char *
2697 target_pid_to_str (ptid_t ptid)
2698 {
2699 struct target_ops *t;
2700
2701 for (t = current_target.beneath; t != NULL; t = t->beneath)
2702 {
2703 if (t->to_pid_to_str != NULL)
2704 return (*t->to_pid_to_str) (t, ptid);
2705 }
2706
2707 return normal_pid_to_str (ptid);
2708 }
2709
2710 char *
2711 target_thread_name (struct thread_info *info)
2712 {
2713 struct target_ops *t;
2714
2715 for (t = current_target.beneath; t != NULL; t = t->beneath)
2716 {
2717 if (t->to_thread_name != NULL)
2718 return (*t->to_thread_name) (info);
2719 }
2720
2721 return NULL;
2722 }
2723
2724 void
2725 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2726 {
2727 struct target_ops *t;
2728
2729 target_dcache_invalidate ();
2730
2731 for (t = current_target.beneath; t != NULL; t = t->beneath)
2732 {
2733 if (t->to_resume != NULL)
2734 {
2735 t->to_resume (t, ptid, step, signal);
2736 if (targetdebug)
2737 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2738 ptid_get_pid (ptid),
2739 step ? "step" : "continue",
2740 gdb_signal_to_name (signal));
2741
2742 registers_changed_ptid (ptid);
2743 set_executing (ptid, 1);
2744 set_running (ptid, 1);
2745 clear_inline_frame_state (ptid);
2746 return;
2747 }
2748 }
2749
2750 noprocess ();
2751 }
2752
2753 void
2754 target_pass_signals (int numsigs, unsigned char *pass_signals)
2755 {
2756 struct target_ops *t;
2757
2758 for (t = current_target.beneath; t != NULL; t = t->beneath)
2759 {
2760 if (t->to_pass_signals != NULL)
2761 {
2762 if (targetdebug)
2763 {
2764 int i;
2765
2766 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2767 numsigs);
2768
2769 for (i = 0; i < numsigs; i++)
2770 if (pass_signals[i])
2771 fprintf_unfiltered (gdb_stdlog, " %s",
2772 gdb_signal_to_name (i));
2773
2774 fprintf_unfiltered (gdb_stdlog, " })\n");
2775 }
2776
2777 (*t->to_pass_signals) (numsigs, pass_signals);
2778 return;
2779 }
2780 }
2781 }
2782
2783 void
2784 target_program_signals (int numsigs, unsigned char *program_signals)
2785 {
2786 struct target_ops *t;
2787
2788 for (t = current_target.beneath; t != NULL; t = t->beneath)
2789 {
2790 if (t->to_program_signals != NULL)
2791 {
2792 if (targetdebug)
2793 {
2794 int i;
2795
2796 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2797 numsigs);
2798
2799 for (i = 0; i < numsigs; i++)
2800 if (program_signals[i])
2801 fprintf_unfiltered (gdb_stdlog, " %s",
2802 gdb_signal_to_name (i));
2803
2804 fprintf_unfiltered (gdb_stdlog, " })\n");
2805 }
2806
2807 (*t->to_program_signals) (numsigs, program_signals);
2808 return;
2809 }
2810 }
2811 }
2812
2813 /* Look through the list of possible targets for a target that can
2814 follow forks. */
2815
2816 int
2817 target_follow_fork (int follow_child, int detach_fork)
2818 {
2819 struct target_ops *t;
2820
2821 for (t = current_target.beneath; t != NULL; t = t->beneath)
2822 {
2823 if (t->to_follow_fork != NULL)
2824 {
2825 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2826
2827 if (targetdebug)
2828 fprintf_unfiltered (gdb_stdlog,
2829 "target_follow_fork (%d, %d) = %d\n",
2830 follow_child, detach_fork, retval);
2831 return retval;
2832 }
2833 }
2834
2835 /* Some target returned a fork event, but did not know how to follow it. */
2836 internal_error (__FILE__, __LINE__,
2837 _("could not find a target to follow fork"));
2838 }
2839
2840 void
2841 target_mourn_inferior (void)
2842 {
2843 struct target_ops *t;
2844
2845 for (t = current_target.beneath; t != NULL; t = t->beneath)
2846 {
2847 if (t->to_mourn_inferior != NULL)
2848 {
2849 t->to_mourn_inferior (t);
2850 if (targetdebug)
2851 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2852
2853 /* We no longer need to keep handles on any of the object files.
2854 Make sure to release them to avoid unnecessarily locking any
2855 of them while we're not actually debugging. */
2856 bfd_cache_close_all ();
2857
2858 return;
2859 }
2860 }
2861
2862 internal_error (__FILE__, __LINE__,
2863 _("could not find a target to follow mourn inferior"));
2864 }
2865
2866 /* Look for a target which can describe architectural features, starting
2867 from TARGET. If we find one, return its description. */
2868
2869 const struct target_desc *
2870 target_read_description (struct target_ops *target)
2871 {
2872 struct target_ops *t;
2873
2874 for (t = target; t != NULL; t = t->beneath)
2875 if (t->to_read_description != NULL)
2876 {
2877 const struct target_desc *tdesc;
2878
2879 tdesc = t->to_read_description (t);
2880 if (tdesc)
2881 return tdesc;
2882 }
2883
2884 return NULL;
2885 }
2886
2887 /* The default implementation of to_search_memory.
2888 This implements a basic search of memory, reading target memory and
2889 performing the search here (as opposed to performing the search in on the
2890 target side with, for example, gdbserver). */
2891
2892 int
2893 simple_search_memory (struct target_ops *ops,
2894 CORE_ADDR start_addr, ULONGEST search_space_len,
2895 const gdb_byte *pattern, ULONGEST pattern_len,
2896 CORE_ADDR *found_addrp)
2897 {
2898 /* NOTE: also defined in find.c testcase. */
2899 #define SEARCH_CHUNK_SIZE 16000
2900 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2901 /* Buffer to hold memory contents for searching. */
2902 gdb_byte *search_buf;
2903 unsigned search_buf_size;
2904 struct cleanup *old_cleanups;
2905
2906 search_buf_size = chunk_size + pattern_len - 1;
2907
2908 /* No point in trying to allocate a buffer larger than the search space. */
2909 if (search_space_len < search_buf_size)
2910 search_buf_size = search_space_len;
2911
2912 search_buf = malloc (search_buf_size);
2913 if (search_buf == NULL)
2914 error (_("Unable to allocate memory to perform the search."));
2915 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2916
2917 /* Prime the search buffer. */
2918
2919 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2920 search_buf, start_addr, search_buf_size) != search_buf_size)
2921 {
2922 warning (_("Unable to access %s bytes of target "
2923 "memory at %s, halting search."),
2924 pulongest (search_buf_size), hex_string (start_addr));
2925 do_cleanups (old_cleanups);
2926 return -1;
2927 }
2928
2929 /* Perform the search.
2930
2931 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2932 When we've scanned N bytes we copy the trailing bytes to the start and
2933 read in another N bytes. */
2934
2935 while (search_space_len >= pattern_len)
2936 {
2937 gdb_byte *found_ptr;
2938 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2939
2940 found_ptr = memmem (search_buf, nr_search_bytes,
2941 pattern, pattern_len);
2942
2943 if (found_ptr != NULL)
2944 {
2945 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2946
2947 *found_addrp = found_addr;
2948 do_cleanups (old_cleanups);
2949 return 1;
2950 }
2951
2952 /* Not found in this chunk, skip to next chunk. */
2953
2954 /* Don't let search_space_len wrap here, it's unsigned. */
2955 if (search_space_len >= chunk_size)
2956 search_space_len -= chunk_size;
2957 else
2958 search_space_len = 0;
2959
2960 if (search_space_len >= pattern_len)
2961 {
2962 unsigned keep_len = search_buf_size - chunk_size;
2963 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2964 int nr_to_read;
2965
2966 /* Copy the trailing part of the previous iteration to the front
2967 of the buffer for the next iteration. */
2968 gdb_assert (keep_len == pattern_len - 1);
2969 memcpy (search_buf, search_buf + chunk_size, keep_len);
2970
2971 nr_to_read = min (search_space_len - keep_len, chunk_size);
2972
2973 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2974 search_buf + keep_len, read_addr,
2975 nr_to_read) != nr_to_read)
2976 {
2977 warning (_("Unable to access %s bytes of target "
2978 "memory at %s, halting search."),
2979 plongest (nr_to_read),
2980 hex_string (read_addr));
2981 do_cleanups (old_cleanups);
2982 return -1;
2983 }
2984
2985 start_addr += chunk_size;
2986 }
2987 }
2988
2989 /* Not found. */
2990
2991 do_cleanups (old_cleanups);
2992 return 0;
2993 }
2994
2995 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2996 sequence of bytes in PATTERN with length PATTERN_LEN.
2997
2998 The result is 1 if found, 0 if not found, and -1 if there was an error
2999 requiring halting of the search (e.g. memory read error).
3000 If the pattern is found the address is recorded in FOUND_ADDRP. */
3001
3002 int
3003 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3004 const gdb_byte *pattern, ULONGEST pattern_len,
3005 CORE_ADDR *found_addrp)
3006 {
3007 struct target_ops *t;
3008 int found;
3009
3010 /* We don't use INHERIT to set current_target.to_search_memory,
3011 so we have to scan the target stack and handle targetdebug
3012 ourselves. */
3013
3014 if (targetdebug)
3015 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3016 hex_string (start_addr));
3017
3018 for (t = current_target.beneath; t != NULL; t = t->beneath)
3019 if (t->to_search_memory != NULL)
3020 break;
3021
3022 if (t != NULL)
3023 {
3024 found = t->to_search_memory (t, start_addr, search_space_len,
3025 pattern, pattern_len, found_addrp);
3026 }
3027 else
3028 {
3029 /* If a special version of to_search_memory isn't available, use the
3030 simple version. */
3031 found = simple_search_memory (current_target.beneath,
3032 start_addr, search_space_len,
3033 pattern, pattern_len, found_addrp);
3034 }
3035
3036 if (targetdebug)
3037 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3038
3039 return found;
3040 }
3041
3042 /* Look through the currently pushed targets. If none of them will
3043 be able to restart the currently running process, issue an error
3044 message. */
3045
3046 void
3047 target_require_runnable (void)
3048 {
3049 struct target_ops *t;
3050
3051 for (t = target_stack; t != NULL; t = t->beneath)
3052 {
3053 /* If this target knows how to create a new program, then
3054 assume we will still be able to after killing the current
3055 one. Either killing and mourning will not pop T, or else
3056 find_default_run_target will find it again. */
3057 if (t->to_create_inferior != NULL)
3058 return;
3059
3060 /* Do not worry about thread_stratum targets that can not
3061 create inferiors. Assume they will be pushed again if
3062 necessary, and continue to the process_stratum. */
3063 if (t->to_stratum == thread_stratum
3064 || t->to_stratum == arch_stratum)
3065 continue;
3066
3067 error (_("The \"%s\" target does not support \"run\". "
3068 "Try \"help target\" or \"continue\"."),
3069 t->to_shortname);
3070 }
3071
3072 /* This function is only called if the target is running. In that
3073 case there should have been a process_stratum target and it
3074 should either know how to create inferiors, or not... */
3075 internal_error (__FILE__, __LINE__, _("No targets found"));
3076 }
3077
3078 /* Look through the list of possible targets for a target that can
3079 execute a run or attach command without any other data. This is
3080 used to locate the default process stratum.
3081
3082 If DO_MESG is not NULL, the result is always valid (error() is
3083 called for errors); else, return NULL on error. */
3084
3085 static struct target_ops *
3086 find_default_run_target (char *do_mesg)
3087 {
3088 struct target_ops **t;
3089 struct target_ops *runable = NULL;
3090 int count;
3091
3092 count = 0;
3093
3094 for (t = target_structs; t < target_structs + target_struct_size;
3095 ++t)
3096 {
3097 if ((*t)->to_can_run && target_can_run (*t))
3098 {
3099 runable = *t;
3100 ++count;
3101 }
3102 }
3103
3104 if (count != 1)
3105 {
3106 if (do_mesg)
3107 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3108 else
3109 return NULL;
3110 }
3111
3112 return runable;
3113 }
3114
3115 void
3116 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3117 {
3118 struct target_ops *t;
3119
3120 t = find_default_run_target ("attach");
3121 (t->to_attach) (t, args, from_tty);
3122 return;
3123 }
3124
3125 void
3126 find_default_create_inferior (struct target_ops *ops,
3127 char *exec_file, char *allargs, char **env,
3128 int from_tty)
3129 {
3130 struct target_ops *t;
3131
3132 t = find_default_run_target ("run");
3133 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3134 return;
3135 }
3136
3137 static int
3138 find_default_can_async_p (void)
3139 {
3140 struct target_ops *t;
3141
3142 /* This may be called before the target is pushed on the stack;
3143 look for the default process stratum. If there's none, gdb isn't
3144 configured with a native debugger, and target remote isn't
3145 connected yet. */
3146 t = find_default_run_target (NULL);
3147 if (t && t->to_can_async_p)
3148 return (t->to_can_async_p) ();
3149 return 0;
3150 }
3151
3152 static int
3153 find_default_is_async_p (void)
3154 {
3155 struct target_ops *t;
3156
3157 /* This may be called before the target is pushed on the stack;
3158 look for the default process stratum. If there's none, gdb isn't
3159 configured with a native debugger, and target remote isn't
3160 connected yet. */
3161 t = find_default_run_target (NULL);
3162 if (t && t->to_is_async_p)
3163 return (t->to_is_async_p) ();
3164 return 0;
3165 }
3166
3167 static int
3168 find_default_supports_non_stop (void)
3169 {
3170 struct target_ops *t;
3171
3172 t = find_default_run_target (NULL);
3173 if (t && t->to_supports_non_stop)
3174 return (t->to_supports_non_stop) ();
3175 return 0;
3176 }
3177
3178 int
3179 target_supports_non_stop (void)
3180 {
3181 struct target_ops *t;
3182
3183 for (t = &current_target; t != NULL; t = t->beneath)
3184 if (t->to_supports_non_stop)
3185 return t->to_supports_non_stop ();
3186
3187 return 0;
3188 }
3189
3190 /* Implement the "info proc" command. */
3191
3192 int
3193 target_info_proc (char *args, enum info_proc_what what)
3194 {
3195 struct target_ops *t;
3196
3197 /* If we're already connected to something that can get us OS
3198 related data, use it. Otherwise, try using the native
3199 target. */
3200 if (current_target.to_stratum >= process_stratum)
3201 t = current_target.beneath;
3202 else
3203 t = find_default_run_target (NULL);
3204
3205 for (; t != NULL; t = t->beneath)
3206 {
3207 if (t->to_info_proc != NULL)
3208 {
3209 t->to_info_proc (t, args, what);
3210
3211 if (targetdebug)
3212 fprintf_unfiltered (gdb_stdlog,
3213 "target_info_proc (\"%s\", %d)\n", args, what);
3214
3215 return 1;
3216 }
3217 }
3218
3219 return 0;
3220 }
3221
3222 static int
3223 find_default_supports_disable_randomization (void)
3224 {
3225 struct target_ops *t;
3226
3227 t = find_default_run_target (NULL);
3228 if (t && t->to_supports_disable_randomization)
3229 return (t->to_supports_disable_randomization) ();
3230 return 0;
3231 }
3232
3233 int
3234 target_supports_disable_randomization (void)
3235 {
3236 struct target_ops *t;
3237
3238 for (t = &current_target; t != NULL; t = t->beneath)
3239 if (t->to_supports_disable_randomization)
3240 return t->to_supports_disable_randomization ();
3241
3242 return 0;
3243 }
3244
3245 char *
3246 target_get_osdata (const char *type)
3247 {
3248 struct target_ops *t;
3249
3250 /* If we're already connected to something that can get us OS
3251 related data, use it. Otherwise, try using the native
3252 target. */
3253 if (current_target.to_stratum >= process_stratum)
3254 t = current_target.beneath;
3255 else
3256 t = find_default_run_target ("get OS data");
3257
3258 if (!t)
3259 return NULL;
3260
3261 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3262 }
3263
3264 /* Determine the current address space of thread PTID. */
3265
3266 struct address_space *
3267 target_thread_address_space (ptid_t ptid)
3268 {
3269 struct address_space *aspace;
3270 struct inferior *inf;
3271 struct target_ops *t;
3272
3273 for (t = current_target.beneath; t != NULL; t = t->beneath)
3274 {
3275 if (t->to_thread_address_space != NULL)
3276 {
3277 aspace = t->to_thread_address_space (t, ptid);
3278 gdb_assert (aspace);
3279
3280 if (targetdebug)
3281 fprintf_unfiltered (gdb_stdlog,
3282 "target_thread_address_space (%s) = %d\n",
3283 target_pid_to_str (ptid),
3284 address_space_num (aspace));
3285 return aspace;
3286 }
3287 }
3288
3289 /* Fall-back to the "main" address space of the inferior. */
3290 inf = find_inferior_pid (ptid_get_pid (ptid));
3291
3292 if (inf == NULL || inf->aspace == NULL)
3293 internal_error (__FILE__, __LINE__,
3294 _("Can't determine the current "
3295 "address space of thread %s\n"),
3296 target_pid_to_str (ptid));
3297
3298 return inf->aspace;
3299 }
3300
3301
3302 /* Target file operations. */
3303
3304 static struct target_ops *
3305 default_fileio_target (void)
3306 {
3307 /* If we're already connected to something that can perform
3308 file I/O, use it. Otherwise, try using the native target. */
3309 if (current_target.to_stratum >= process_stratum)
3310 return current_target.beneath;
3311 else
3312 return find_default_run_target ("file I/O");
3313 }
3314
3315 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3316 target file descriptor, or -1 if an error occurs (and set
3317 *TARGET_ERRNO). */
3318 int
3319 target_fileio_open (const char *filename, int flags, int mode,
3320 int *target_errno)
3321 {
3322 struct target_ops *t;
3323
3324 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3325 {
3326 if (t->to_fileio_open != NULL)
3327 {
3328 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3329
3330 if (targetdebug)
3331 fprintf_unfiltered (gdb_stdlog,
3332 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3333 filename, flags, mode,
3334 fd, fd != -1 ? 0 : *target_errno);
3335 return fd;
3336 }
3337 }
3338
3339 *target_errno = FILEIO_ENOSYS;
3340 return -1;
3341 }
3342
3343 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3344 Return the number of bytes written, or -1 if an error occurs
3345 (and set *TARGET_ERRNO). */
3346 int
3347 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3348 ULONGEST offset, int *target_errno)
3349 {
3350 struct target_ops *t;
3351
3352 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3353 {
3354 if (t->to_fileio_pwrite != NULL)
3355 {
3356 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3357 target_errno);
3358
3359 if (targetdebug)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "target_fileio_pwrite (%d,...,%d,%s) "
3362 "= %d (%d)\n",
3363 fd, len, pulongest (offset),
3364 ret, ret != -1 ? 0 : *target_errno);
3365 return ret;
3366 }
3367 }
3368
3369 *target_errno = FILEIO_ENOSYS;
3370 return -1;
3371 }
3372
3373 /* Read up to LEN bytes FD on the target into READ_BUF.
3374 Return the number of bytes read, or -1 if an error occurs
3375 (and set *TARGET_ERRNO). */
3376 int
3377 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3378 ULONGEST offset, int *target_errno)
3379 {
3380 struct target_ops *t;
3381
3382 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3383 {
3384 if (t->to_fileio_pread != NULL)
3385 {
3386 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3387 target_errno);
3388
3389 if (targetdebug)
3390 fprintf_unfiltered (gdb_stdlog,
3391 "target_fileio_pread (%d,...,%d,%s) "
3392 "= %d (%d)\n",
3393 fd, len, pulongest (offset),
3394 ret, ret != -1 ? 0 : *target_errno);
3395 return ret;
3396 }
3397 }
3398
3399 *target_errno = FILEIO_ENOSYS;
3400 return -1;
3401 }
3402
3403 /* Close FD on the target. Return 0, or -1 if an error occurs
3404 (and set *TARGET_ERRNO). */
3405 int
3406 target_fileio_close (int fd, int *target_errno)
3407 {
3408 struct target_ops *t;
3409
3410 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3411 {
3412 if (t->to_fileio_close != NULL)
3413 {
3414 int ret = t->to_fileio_close (fd, target_errno);
3415
3416 if (targetdebug)
3417 fprintf_unfiltered (gdb_stdlog,
3418 "target_fileio_close (%d) = %d (%d)\n",
3419 fd, ret, ret != -1 ? 0 : *target_errno);
3420 return ret;
3421 }
3422 }
3423
3424 *target_errno = FILEIO_ENOSYS;
3425 return -1;
3426 }
3427
3428 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3429 occurs (and set *TARGET_ERRNO). */
3430 int
3431 target_fileio_unlink (const char *filename, int *target_errno)
3432 {
3433 struct target_ops *t;
3434
3435 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3436 {
3437 if (t->to_fileio_unlink != NULL)
3438 {
3439 int ret = t->to_fileio_unlink (filename, target_errno);
3440
3441 if (targetdebug)
3442 fprintf_unfiltered (gdb_stdlog,
3443 "target_fileio_unlink (%s) = %d (%d)\n",
3444 filename, ret, ret != -1 ? 0 : *target_errno);
3445 return ret;
3446 }
3447 }
3448
3449 *target_errno = FILEIO_ENOSYS;
3450 return -1;
3451 }
3452
3453 /* Read value of symbolic link FILENAME on the target. Return a
3454 null-terminated string allocated via xmalloc, or NULL if an error
3455 occurs (and set *TARGET_ERRNO). */
3456 char *
3457 target_fileio_readlink (const char *filename, int *target_errno)
3458 {
3459 struct target_ops *t;
3460
3461 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3462 {
3463 if (t->to_fileio_readlink != NULL)
3464 {
3465 char *ret = t->to_fileio_readlink (filename, target_errno);
3466
3467 if (targetdebug)
3468 fprintf_unfiltered (gdb_stdlog,
3469 "target_fileio_readlink (%s) = %s (%d)\n",
3470 filename, ret? ret : "(nil)",
3471 ret? 0 : *target_errno);
3472 return ret;
3473 }
3474 }
3475
3476 *target_errno = FILEIO_ENOSYS;
3477 return NULL;
3478 }
3479
3480 static void
3481 target_fileio_close_cleanup (void *opaque)
3482 {
3483 int fd = *(int *) opaque;
3484 int target_errno;
3485
3486 target_fileio_close (fd, &target_errno);
3487 }
3488
3489 /* Read target file FILENAME. Store the result in *BUF_P and
3490 return the size of the transferred data. PADDING additional bytes are
3491 available in *BUF_P. This is a helper function for
3492 target_fileio_read_alloc; see the declaration of that function for more
3493 information. */
3494
3495 static LONGEST
3496 target_fileio_read_alloc_1 (const char *filename,
3497 gdb_byte **buf_p, int padding)
3498 {
3499 struct cleanup *close_cleanup;
3500 size_t buf_alloc, buf_pos;
3501 gdb_byte *buf;
3502 LONGEST n;
3503 int fd;
3504 int target_errno;
3505
3506 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3507 if (fd == -1)
3508 return -1;
3509
3510 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3511
3512 /* Start by reading up to 4K at a time. The target will throttle
3513 this number down if necessary. */
3514 buf_alloc = 4096;
3515 buf = xmalloc (buf_alloc);
3516 buf_pos = 0;
3517 while (1)
3518 {
3519 n = target_fileio_pread (fd, &buf[buf_pos],
3520 buf_alloc - buf_pos - padding, buf_pos,
3521 &target_errno);
3522 if (n < 0)
3523 {
3524 /* An error occurred. */
3525 do_cleanups (close_cleanup);
3526 xfree (buf);
3527 return -1;
3528 }
3529 else if (n == 0)
3530 {
3531 /* Read all there was. */
3532 do_cleanups (close_cleanup);
3533 if (buf_pos == 0)
3534 xfree (buf);
3535 else
3536 *buf_p = buf;
3537 return buf_pos;
3538 }
3539
3540 buf_pos += n;
3541
3542 /* If the buffer is filling up, expand it. */
3543 if (buf_alloc < buf_pos * 2)
3544 {
3545 buf_alloc *= 2;
3546 buf = xrealloc (buf, buf_alloc);
3547 }
3548
3549 QUIT;
3550 }
3551 }
3552
3553 /* Read target file FILENAME. Store the result in *BUF_P and return
3554 the size of the transferred data. See the declaration in "target.h"
3555 function for more information about the return value. */
3556
3557 LONGEST
3558 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3559 {
3560 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3561 }
3562
3563 /* Read target file FILENAME. The result is NUL-terminated and
3564 returned as a string, allocated using xmalloc. If an error occurs
3565 or the transfer is unsupported, NULL is returned. Empty objects
3566 are returned as allocated but empty strings. A warning is issued
3567 if the result contains any embedded NUL bytes. */
3568
3569 char *
3570 target_fileio_read_stralloc (const char *filename)
3571 {
3572 gdb_byte *buffer;
3573 char *bufstr;
3574 LONGEST i, transferred;
3575
3576 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3577 bufstr = (char *) buffer;
3578
3579 if (transferred < 0)
3580 return NULL;
3581
3582 if (transferred == 0)
3583 return xstrdup ("");
3584
3585 bufstr[transferred] = 0;
3586
3587 /* Check for embedded NUL bytes; but allow trailing NULs. */
3588 for (i = strlen (bufstr); i < transferred; i++)
3589 if (bufstr[i] != 0)
3590 {
3591 warning (_("target file %s "
3592 "contained unexpected null characters"),
3593 filename);
3594 break;
3595 }
3596
3597 return bufstr;
3598 }
3599
3600
3601 static int
3602 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3603 {
3604 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3605 }
3606
3607 static int
3608 default_watchpoint_addr_within_range (struct target_ops *target,
3609 CORE_ADDR addr,
3610 CORE_ADDR start, int length)
3611 {
3612 return addr >= start && addr < start + length;
3613 }
3614
3615 static struct gdbarch *
3616 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3617 {
3618 return target_gdbarch ();
3619 }
3620
3621 static int
3622 return_zero (void)
3623 {
3624 return 0;
3625 }
3626
3627 static int
3628 return_one (void)
3629 {
3630 return 1;
3631 }
3632
3633 static int
3634 return_minus_one (void)
3635 {
3636 return -1;
3637 }
3638
3639 static void *
3640 return_null (void)
3641 {
3642 return 0;
3643 }
3644
3645 /*
3646 * Find the next target down the stack from the specified target.
3647 */
3648
3649 struct target_ops *
3650 find_target_beneath (struct target_ops *t)
3651 {
3652 return t->beneath;
3653 }
3654
3655 \f
3656 /* The inferior process has died. Long live the inferior! */
3657
3658 void
3659 generic_mourn_inferior (void)
3660 {
3661 ptid_t ptid;
3662
3663 ptid = inferior_ptid;
3664 inferior_ptid = null_ptid;
3665
3666 /* Mark breakpoints uninserted in case something tries to delete a
3667 breakpoint while we delete the inferior's threads (which would
3668 fail, since the inferior is long gone). */
3669 mark_breakpoints_out ();
3670
3671 if (!ptid_equal (ptid, null_ptid))
3672 {
3673 int pid = ptid_get_pid (ptid);
3674 exit_inferior (pid);
3675 }
3676
3677 /* Note this wipes step-resume breakpoints, so needs to be done
3678 after exit_inferior, which ends up referencing the step-resume
3679 breakpoints through clear_thread_inferior_resources. */
3680 breakpoint_init_inferior (inf_exited);
3681
3682 registers_changed ();
3683
3684 reopen_exec_file ();
3685 reinit_frame_cache ();
3686
3687 if (deprecated_detach_hook)
3688 deprecated_detach_hook ();
3689 }
3690 \f
3691 /* Convert a normal process ID to a string. Returns the string in a
3692 static buffer. */
3693
3694 char *
3695 normal_pid_to_str (ptid_t ptid)
3696 {
3697 static char buf[32];
3698
3699 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3700 return buf;
3701 }
3702
3703 static char *
3704 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3705 {
3706 return normal_pid_to_str (ptid);
3707 }
3708
3709 /* Error-catcher for target_find_memory_regions. */
3710 static int
3711 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3712 {
3713 error (_("Command not implemented for this target."));
3714 return 0;
3715 }
3716
3717 /* Error-catcher for target_make_corefile_notes. */
3718 static char *
3719 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3720 {
3721 error (_("Command not implemented for this target."));
3722 return NULL;
3723 }
3724
3725 /* Error-catcher for target_get_bookmark. */
3726 static gdb_byte *
3727 dummy_get_bookmark (char *ignore1, int ignore2)
3728 {
3729 tcomplain ();
3730 return NULL;
3731 }
3732
3733 /* Error-catcher for target_goto_bookmark. */
3734 static void
3735 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3736 {
3737 tcomplain ();
3738 }
3739
3740 /* Set up the handful of non-empty slots needed by the dummy target
3741 vector. */
3742
3743 static void
3744 init_dummy_target (void)
3745 {
3746 dummy_target.to_shortname = "None";
3747 dummy_target.to_longname = "None";
3748 dummy_target.to_doc = "";
3749 dummy_target.to_attach = find_default_attach;
3750 dummy_target.to_detach =
3751 (void (*)(struct target_ops *, const char *, int))target_ignore;
3752 dummy_target.to_create_inferior = find_default_create_inferior;
3753 dummy_target.to_can_async_p = find_default_can_async_p;
3754 dummy_target.to_is_async_p = find_default_is_async_p;
3755 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3756 dummy_target.to_supports_disable_randomization
3757 = find_default_supports_disable_randomization;
3758 dummy_target.to_pid_to_str = dummy_pid_to_str;
3759 dummy_target.to_stratum = dummy_stratum;
3760 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3761 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3762 dummy_target.to_get_bookmark = dummy_get_bookmark;
3763 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3764 dummy_target.to_xfer_partial = default_xfer_partial;
3765 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3766 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3767 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3768 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3769 dummy_target.to_has_execution
3770 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3771 dummy_target.to_stopped_by_watchpoint = return_zero;
3772 dummy_target.to_stopped_data_address =
3773 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3774 dummy_target.to_magic = OPS_MAGIC;
3775 }
3776 \f
3777 static void
3778 debug_to_open (char *args, int from_tty)
3779 {
3780 debug_target.to_open (args, from_tty);
3781
3782 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3783 }
3784
3785 void
3786 target_close (struct target_ops *targ)
3787 {
3788 gdb_assert (!target_is_pushed (targ));
3789
3790 if (targ->to_xclose != NULL)
3791 targ->to_xclose (targ);
3792 else if (targ->to_close != NULL)
3793 targ->to_close ();
3794
3795 if (targetdebug)
3796 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3797 }
3798
3799 void
3800 target_attach (char *args, int from_tty)
3801 {
3802 struct target_ops *t;
3803
3804 for (t = current_target.beneath; t != NULL; t = t->beneath)
3805 {
3806 if (t->to_attach != NULL)
3807 {
3808 t->to_attach (t, args, from_tty);
3809 if (targetdebug)
3810 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3811 args, from_tty);
3812 return;
3813 }
3814 }
3815
3816 internal_error (__FILE__, __LINE__,
3817 _("could not find a target to attach"));
3818 }
3819
3820 int
3821 target_thread_alive (ptid_t ptid)
3822 {
3823 struct target_ops *t;
3824
3825 for (t = current_target.beneath; t != NULL; t = t->beneath)
3826 {
3827 if (t->to_thread_alive != NULL)
3828 {
3829 int retval;
3830
3831 retval = t->to_thread_alive (t, ptid);
3832 if (targetdebug)
3833 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3834 ptid_get_pid (ptid), retval);
3835
3836 return retval;
3837 }
3838 }
3839
3840 return 0;
3841 }
3842
3843 void
3844 target_find_new_threads (void)
3845 {
3846 struct target_ops *t;
3847
3848 for (t = current_target.beneath; t != NULL; t = t->beneath)
3849 {
3850 if (t->to_find_new_threads != NULL)
3851 {
3852 t->to_find_new_threads (t);
3853 if (targetdebug)
3854 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3855
3856 return;
3857 }
3858 }
3859 }
3860
3861 void
3862 target_stop (ptid_t ptid)
3863 {
3864 if (!may_stop)
3865 {
3866 warning (_("May not interrupt or stop the target, ignoring attempt"));
3867 return;
3868 }
3869
3870 (*current_target.to_stop) (ptid);
3871 }
3872
3873 static void
3874 debug_to_post_attach (int pid)
3875 {
3876 debug_target.to_post_attach (pid);
3877
3878 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3879 }
3880
3881 /* Concatenate ELEM to LIST, a comma separate list, and return the
3882 result. The LIST incoming argument is released. */
3883
3884 static char *
3885 str_comma_list_concat_elem (char *list, const char *elem)
3886 {
3887 if (list == NULL)
3888 return xstrdup (elem);
3889 else
3890 return reconcat (list, list, ", ", elem, (char *) NULL);
3891 }
3892
3893 /* Helper for target_options_to_string. If OPT is present in
3894 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3895 Returns the new resulting string. OPT is removed from
3896 TARGET_OPTIONS. */
3897
3898 static char *
3899 do_option (int *target_options, char *ret,
3900 int opt, char *opt_str)
3901 {
3902 if ((*target_options & opt) != 0)
3903 {
3904 ret = str_comma_list_concat_elem (ret, opt_str);
3905 *target_options &= ~opt;
3906 }
3907
3908 return ret;
3909 }
3910
3911 char *
3912 target_options_to_string (int target_options)
3913 {
3914 char *ret = NULL;
3915
3916 #define DO_TARG_OPTION(OPT) \
3917 ret = do_option (&target_options, ret, OPT, #OPT)
3918
3919 DO_TARG_OPTION (TARGET_WNOHANG);
3920
3921 if (target_options != 0)
3922 ret = str_comma_list_concat_elem (ret, "unknown???");
3923
3924 if (ret == NULL)
3925 ret = xstrdup ("");
3926 return ret;
3927 }
3928
3929 static void
3930 debug_print_register (const char * func,
3931 struct regcache *regcache, int regno)
3932 {
3933 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3934
3935 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3936 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3937 && gdbarch_register_name (gdbarch, regno) != NULL
3938 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3939 fprintf_unfiltered (gdb_stdlog, "(%s)",
3940 gdbarch_register_name (gdbarch, regno));
3941 else
3942 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3943 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3944 {
3945 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3946 int i, size = register_size (gdbarch, regno);
3947 gdb_byte buf[MAX_REGISTER_SIZE];
3948
3949 regcache_raw_collect (regcache, regno, buf);
3950 fprintf_unfiltered (gdb_stdlog, " = ");
3951 for (i = 0; i < size; i++)
3952 {
3953 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3954 }
3955 if (size <= sizeof (LONGEST))
3956 {
3957 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3958
3959 fprintf_unfiltered (gdb_stdlog, " %s %s",
3960 core_addr_to_string_nz (val), plongest (val));
3961 }
3962 }
3963 fprintf_unfiltered (gdb_stdlog, "\n");
3964 }
3965
3966 void
3967 target_fetch_registers (struct regcache *regcache, int regno)
3968 {
3969 struct target_ops *t;
3970
3971 for (t = current_target.beneath; t != NULL; t = t->beneath)
3972 {
3973 if (t->to_fetch_registers != NULL)
3974 {
3975 t->to_fetch_registers (t, regcache, regno);
3976 if (targetdebug)
3977 debug_print_register ("target_fetch_registers", regcache, regno);
3978 return;
3979 }
3980 }
3981 }
3982
3983 void
3984 target_store_registers (struct regcache *regcache, int regno)
3985 {
3986 struct target_ops *t;
3987
3988 if (!may_write_registers)
3989 error (_("Writing to registers is not allowed (regno %d)"), regno);
3990
3991 for (t = current_target.beneath; t != NULL; t = t->beneath)
3992 {
3993 if (t->to_store_registers != NULL)
3994 {
3995 t->to_store_registers (t, regcache, regno);
3996 if (targetdebug)
3997 {
3998 debug_print_register ("target_store_registers", regcache, regno);
3999 }
4000 return;
4001 }
4002 }
4003
4004 noprocess ();
4005 }
4006
4007 int
4008 target_core_of_thread (ptid_t ptid)
4009 {
4010 struct target_ops *t;
4011
4012 for (t = current_target.beneath; t != NULL; t = t->beneath)
4013 {
4014 if (t->to_core_of_thread != NULL)
4015 {
4016 int retval = t->to_core_of_thread (t, ptid);
4017
4018 if (targetdebug)
4019 fprintf_unfiltered (gdb_stdlog,
4020 "target_core_of_thread (%d) = %d\n",
4021 ptid_get_pid (ptid), retval);
4022 return retval;
4023 }
4024 }
4025
4026 return -1;
4027 }
4028
4029 int
4030 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4031 {
4032 struct target_ops *t;
4033
4034 for (t = current_target.beneath; t != NULL; t = t->beneath)
4035 {
4036 if (t->to_verify_memory != NULL)
4037 {
4038 int retval = t->to_verify_memory (t, data, memaddr, size);
4039
4040 if (targetdebug)
4041 fprintf_unfiltered (gdb_stdlog,
4042 "target_verify_memory (%s, %s) = %d\n",
4043 paddress (target_gdbarch (), memaddr),
4044 pulongest (size),
4045 retval);
4046 return retval;
4047 }
4048 }
4049
4050 tcomplain ();
4051 }
4052
4053 /* The documentation for this function is in its prototype declaration in
4054 target.h. */
4055
4056 int
4057 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4058 {
4059 struct target_ops *t;
4060
4061 for (t = current_target.beneath; t != NULL; t = t->beneath)
4062 if (t->to_insert_mask_watchpoint != NULL)
4063 {
4064 int ret;
4065
4066 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4067
4068 if (targetdebug)
4069 fprintf_unfiltered (gdb_stdlog, "\
4070 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4071 core_addr_to_string (addr),
4072 core_addr_to_string (mask), rw, ret);
4073
4074 return ret;
4075 }
4076
4077 return 1;
4078 }
4079
4080 /* The documentation for this function is in its prototype declaration in
4081 target.h. */
4082
4083 int
4084 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4085 {
4086 struct target_ops *t;
4087
4088 for (t = current_target.beneath; t != NULL; t = t->beneath)
4089 if (t->to_remove_mask_watchpoint != NULL)
4090 {
4091 int ret;
4092
4093 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4094
4095 if (targetdebug)
4096 fprintf_unfiltered (gdb_stdlog, "\
4097 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4098 core_addr_to_string (addr),
4099 core_addr_to_string (mask), rw, ret);
4100
4101 return ret;
4102 }
4103
4104 return 1;
4105 }
4106
4107 /* The documentation for this function is in its prototype declaration
4108 in target.h. */
4109
4110 int
4111 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4112 {
4113 struct target_ops *t;
4114
4115 for (t = current_target.beneath; t != NULL; t = t->beneath)
4116 if (t->to_masked_watch_num_registers != NULL)
4117 return t->to_masked_watch_num_registers (t, addr, mask);
4118
4119 return -1;
4120 }
4121
4122 /* The documentation for this function is in its prototype declaration
4123 in target.h. */
4124
4125 int
4126 target_ranged_break_num_registers (void)
4127 {
4128 struct target_ops *t;
4129
4130 for (t = current_target.beneath; t != NULL; t = t->beneath)
4131 if (t->to_ranged_break_num_registers != NULL)
4132 return t->to_ranged_break_num_registers (t);
4133
4134 return -1;
4135 }
4136
4137 /* See target.h. */
4138
4139 int
4140 target_supports_btrace (void)
4141 {
4142 struct target_ops *t;
4143
4144 for (t = current_target.beneath; t != NULL; t = t->beneath)
4145 if (t->to_supports_btrace != NULL)
4146 return t->to_supports_btrace ();
4147
4148 return 0;
4149 }
4150
4151 /* See target.h. */
4152
4153 struct btrace_target_info *
4154 target_enable_btrace (ptid_t ptid)
4155 {
4156 struct target_ops *t;
4157
4158 for (t = current_target.beneath; t != NULL; t = t->beneath)
4159 if (t->to_enable_btrace != NULL)
4160 return t->to_enable_btrace (ptid);
4161
4162 tcomplain ();
4163 return NULL;
4164 }
4165
4166 /* See target.h. */
4167
4168 void
4169 target_disable_btrace (struct btrace_target_info *btinfo)
4170 {
4171 struct target_ops *t;
4172
4173 for (t = current_target.beneath; t != NULL; t = t->beneath)
4174 if (t->to_disable_btrace != NULL)
4175 {
4176 t->to_disable_btrace (btinfo);
4177 return;
4178 }
4179
4180 tcomplain ();
4181 }
4182
4183 /* See target.h. */
4184
4185 void
4186 target_teardown_btrace (struct btrace_target_info *btinfo)
4187 {
4188 struct target_ops *t;
4189
4190 for (t = current_target.beneath; t != NULL; t = t->beneath)
4191 if (t->to_teardown_btrace != NULL)
4192 {
4193 t->to_teardown_btrace (btinfo);
4194 return;
4195 }
4196
4197 tcomplain ();
4198 }
4199
4200 /* See target.h. */
4201
4202 VEC (btrace_block_s) *
4203 target_read_btrace (struct btrace_target_info *btinfo,
4204 enum btrace_read_type type)
4205 {
4206 struct target_ops *t;
4207
4208 for (t = current_target.beneath; t != NULL; t = t->beneath)
4209 if (t->to_read_btrace != NULL)
4210 return t->to_read_btrace (btinfo, type);
4211
4212 tcomplain ();
4213 return NULL;
4214 }
4215
4216 /* See target.h. */
4217
4218 void
4219 target_stop_recording (void)
4220 {
4221 struct target_ops *t;
4222
4223 for (t = current_target.beneath; t != NULL; t = t->beneath)
4224 if (t->to_stop_recording != NULL)
4225 {
4226 t->to_stop_recording ();
4227 return;
4228 }
4229
4230 /* This is optional. */
4231 }
4232
4233 /* See target.h. */
4234
4235 void
4236 target_info_record (void)
4237 {
4238 struct target_ops *t;
4239
4240 for (t = current_target.beneath; t != NULL; t = t->beneath)
4241 if (t->to_info_record != NULL)
4242 {
4243 t->to_info_record ();
4244 return;
4245 }
4246
4247 tcomplain ();
4248 }
4249
4250 /* See target.h. */
4251
4252 void
4253 target_save_record (const char *filename)
4254 {
4255 struct target_ops *t;
4256
4257 for (t = current_target.beneath; t != NULL; t = t->beneath)
4258 if (t->to_save_record != NULL)
4259 {
4260 t->to_save_record (filename);
4261 return;
4262 }
4263
4264 tcomplain ();
4265 }
4266
4267 /* See target.h. */
4268
4269 int
4270 target_supports_delete_record (void)
4271 {
4272 struct target_ops *t;
4273
4274 for (t = current_target.beneath; t != NULL; t = t->beneath)
4275 if (t->to_delete_record != NULL)
4276 return 1;
4277
4278 return 0;
4279 }
4280
4281 /* See target.h. */
4282
4283 void
4284 target_delete_record (void)
4285 {
4286 struct target_ops *t;
4287
4288 for (t = current_target.beneath; t != NULL; t = t->beneath)
4289 if (t->to_delete_record != NULL)
4290 {
4291 t->to_delete_record ();
4292 return;
4293 }
4294
4295 tcomplain ();
4296 }
4297
4298 /* See target.h. */
4299
4300 int
4301 target_record_is_replaying (void)
4302 {
4303 struct target_ops *t;
4304
4305 for (t = current_target.beneath; t != NULL; t = t->beneath)
4306 if (t->to_record_is_replaying != NULL)
4307 return t->to_record_is_replaying ();
4308
4309 return 0;
4310 }
4311
4312 /* See target.h. */
4313
4314 void
4315 target_goto_record_begin (void)
4316 {
4317 struct target_ops *t;
4318
4319 for (t = current_target.beneath; t != NULL; t = t->beneath)
4320 if (t->to_goto_record_begin != NULL)
4321 {
4322 t->to_goto_record_begin ();
4323 return;
4324 }
4325
4326 tcomplain ();
4327 }
4328
4329 /* See target.h. */
4330
4331 void
4332 target_goto_record_end (void)
4333 {
4334 struct target_ops *t;
4335
4336 for (t = current_target.beneath; t != NULL; t = t->beneath)
4337 if (t->to_goto_record_end != NULL)
4338 {
4339 t->to_goto_record_end ();
4340 return;
4341 }
4342
4343 tcomplain ();
4344 }
4345
4346 /* See target.h. */
4347
4348 void
4349 target_goto_record (ULONGEST insn)
4350 {
4351 struct target_ops *t;
4352
4353 for (t = current_target.beneath; t != NULL; t = t->beneath)
4354 if (t->to_goto_record != NULL)
4355 {
4356 t->to_goto_record (insn);
4357 return;
4358 }
4359
4360 tcomplain ();
4361 }
4362
4363 /* See target.h. */
4364
4365 void
4366 target_insn_history (int size, int flags)
4367 {
4368 struct target_ops *t;
4369
4370 for (t = current_target.beneath; t != NULL; t = t->beneath)
4371 if (t->to_insn_history != NULL)
4372 {
4373 t->to_insn_history (size, flags);
4374 return;
4375 }
4376
4377 tcomplain ();
4378 }
4379
4380 /* See target.h. */
4381
4382 void
4383 target_insn_history_from (ULONGEST from, int size, int flags)
4384 {
4385 struct target_ops *t;
4386
4387 for (t = current_target.beneath; t != NULL; t = t->beneath)
4388 if (t->to_insn_history_from != NULL)
4389 {
4390 t->to_insn_history_from (from, size, flags);
4391 return;
4392 }
4393
4394 tcomplain ();
4395 }
4396
4397 /* See target.h. */
4398
4399 void
4400 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4401 {
4402 struct target_ops *t;
4403
4404 for (t = current_target.beneath; t != NULL; t = t->beneath)
4405 if (t->to_insn_history_range != NULL)
4406 {
4407 t->to_insn_history_range (begin, end, flags);
4408 return;
4409 }
4410
4411 tcomplain ();
4412 }
4413
4414 /* See target.h. */
4415
4416 void
4417 target_call_history (int size, int flags)
4418 {
4419 struct target_ops *t;
4420
4421 for (t = current_target.beneath; t != NULL; t = t->beneath)
4422 if (t->to_call_history != NULL)
4423 {
4424 t->to_call_history (size, flags);
4425 return;
4426 }
4427
4428 tcomplain ();
4429 }
4430
4431 /* See target.h. */
4432
4433 void
4434 target_call_history_from (ULONGEST begin, int size, int flags)
4435 {
4436 struct target_ops *t;
4437
4438 for (t = current_target.beneath; t != NULL; t = t->beneath)
4439 if (t->to_call_history_from != NULL)
4440 {
4441 t->to_call_history_from (begin, size, flags);
4442 return;
4443 }
4444
4445 tcomplain ();
4446 }
4447
4448 /* See target.h. */
4449
4450 void
4451 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4452 {
4453 struct target_ops *t;
4454
4455 for (t = current_target.beneath; t != NULL; t = t->beneath)
4456 if (t->to_call_history_range != NULL)
4457 {
4458 t->to_call_history_range (begin, end, flags);
4459 return;
4460 }
4461
4462 tcomplain ();
4463 }
4464
4465 static void
4466 debug_to_prepare_to_store (struct regcache *regcache)
4467 {
4468 debug_target.to_prepare_to_store (regcache);
4469
4470 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4471 }
4472
4473 static int
4474 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4475 int write, struct mem_attrib *attrib,
4476 struct target_ops *target)
4477 {
4478 int retval;
4479
4480 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4481 attrib, target);
4482
4483 fprintf_unfiltered (gdb_stdlog,
4484 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4485 paddress (target_gdbarch (), memaddr), len,
4486 write ? "write" : "read", retval);
4487
4488 if (retval > 0)
4489 {
4490 int i;
4491
4492 fputs_unfiltered (", bytes =", gdb_stdlog);
4493 for (i = 0; i < retval; i++)
4494 {
4495 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4496 {
4497 if (targetdebug < 2 && i > 0)
4498 {
4499 fprintf_unfiltered (gdb_stdlog, " ...");
4500 break;
4501 }
4502 fprintf_unfiltered (gdb_stdlog, "\n");
4503 }
4504
4505 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4506 }
4507 }
4508
4509 fputc_unfiltered ('\n', gdb_stdlog);
4510
4511 return retval;
4512 }
4513
4514 static void
4515 debug_to_files_info (struct target_ops *target)
4516 {
4517 debug_target.to_files_info (target);
4518
4519 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4520 }
4521
4522 static int
4523 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4524 struct bp_target_info *bp_tgt)
4525 {
4526 int retval;
4527
4528 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4529
4530 fprintf_unfiltered (gdb_stdlog,
4531 "target_insert_breakpoint (%s, xxx) = %ld\n",
4532 core_addr_to_string (bp_tgt->placed_address),
4533 (unsigned long) retval);
4534 return retval;
4535 }
4536
4537 static int
4538 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4539 struct bp_target_info *bp_tgt)
4540 {
4541 int retval;
4542
4543 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4544
4545 fprintf_unfiltered (gdb_stdlog,
4546 "target_remove_breakpoint (%s, xxx) = %ld\n",
4547 core_addr_to_string (bp_tgt->placed_address),
4548 (unsigned long) retval);
4549 return retval;
4550 }
4551
4552 static int
4553 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4554 {
4555 int retval;
4556
4557 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4558
4559 fprintf_unfiltered (gdb_stdlog,
4560 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4561 (unsigned long) type,
4562 (unsigned long) cnt,
4563 (unsigned long) from_tty,
4564 (unsigned long) retval);
4565 return retval;
4566 }
4567
4568 static int
4569 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4570 {
4571 CORE_ADDR retval;
4572
4573 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4574
4575 fprintf_unfiltered (gdb_stdlog,
4576 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4577 core_addr_to_string (addr), (unsigned long) len,
4578 core_addr_to_string (retval));
4579 return retval;
4580 }
4581
4582 static int
4583 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4584 struct expression *cond)
4585 {
4586 int retval;
4587
4588 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4589 rw, cond);
4590
4591 fprintf_unfiltered (gdb_stdlog,
4592 "target_can_accel_watchpoint_condition "
4593 "(%s, %d, %d, %s) = %ld\n",
4594 core_addr_to_string (addr), len, rw,
4595 host_address_to_string (cond), (unsigned long) retval);
4596 return retval;
4597 }
4598
4599 static int
4600 debug_to_stopped_by_watchpoint (void)
4601 {
4602 int retval;
4603
4604 retval = debug_target.to_stopped_by_watchpoint ();
4605
4606 fprintf_unfiltered (gdb_stdlog,
4607 "target_stopped_by_watchpoint () = %ld\n",
4608 (unsigned long) retval);
4609 return retval;
4610 }
4611
4612 static int
4613 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4614 {
4615 int retval;
4616
4617 retval = debug_target.to_stopped_data_address (target, addr);
4618
4619 fprintf_unfiltered (gdb_stdlog,
4620 "target_stopped_data_address ([%s]) = %ld\n",
4621 core_addr_to_string (*addr),
4622 (unsigned long)retval);
4623 return retval;
4624 }
4625
4626 static int
4627 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4628 CORE_ADDR addr,
4629 CORE_ADDR start, int length)
4630 {
4631 int retval;
4632
4633 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4634 start, length);
4635
4636 fprintf_filtered (gdb_stdlog,
4637 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4638 core_addr_to_string (addr), core_addr_to_string (start),
4639 length, retval);
4640 return retval;
4641 }
4642
4643 static int
4644 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4645 struct bp_target_info *bp_tgt)
4646 {
4647 int retval;
4648
4649 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4650
4651 fprintf_unfiltered (gdb_stdlog,
4652 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4653 core_addr_to_string (bp_tgt->placed_address),
4654 (unsigned long) retval);
4655 return retval;
4656 }
4657
4658 static int
4659 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4660 struct bp_target_info *bp_tgt)
4661 {
4662 int retval;
4663
4664 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4665
4666 fprintf_unfiltered (gdb_stdlog,
4667 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4668 core_addr_to_string (bp_tgt->placed_address),
4669 (unsigned long) retval);
4670 return retval;
4671 }
4672
4673 static int
4674 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4675 struct expression *cond)
4676 {
4677 int retval;
4678
4679 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4680
4681 fprintf_unfiltered (gdb_stdlog,
4682 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4683 core_addr_to_string (addr), len, type,
4684 host_address_to_string (cond), (unsigned long) retval);
4685 return retval;
4686 }
4687
4688 static int
4689 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4690 struct expression *cond)
4691 {
4692 int retval;
4693
4694 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4695
4696 fprintf_unfiltered (gdb_stdlog,
4697 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4698 core_addr_to_string (addr), len, type,
4699 host_address_to_string (cond), (unsigned long) retval);
4700 return retval;
4701 }
4702
4703 static void
4704 debug_to_terminal_init (void)
4705 {
4706 debug_target.to_terminal_init ();
4707
4708 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4709 }
4710
4711 static void
4712 debug_to_terminal_inferior (void)
4713 {
4714 debug_target.to_terminal_inferior ();
4715
4716 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4717 }
4718
4719 static void
4720 debug_to_terminal_ours_for_output (void)
4721 {
4722 debug_target.to_terminal_ours_for_output ();
4723
4724 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4725 }
4726
4727 static void
4728 debug_to_terminal_ours (void)
4729 {
4730 debug_target.to_terminal_ours ();
4731
4732 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4733 }
4734
4735 static void
4736 debug_to_terminal_save_ours (void)
4737 {
4738 debug_target.to_terminal_save_ours ();
4739
4740 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4741 }
4742
4743 static void
4744 debug_to_terminal_info (const char *arg, int from_tty)
4745 {
4746 debug_target.to_terminal_info (arg, from_tty);
4747
4748 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4749 from_tty);
4750 }
4751
4752 static void
4753 debug_to_load (char *args, int from_tty)
4754 {
4755 debug_target.to_load (args, from_tty);
4756
4757 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4758 }
4759
4760 static void
4761 debug_to_post_startup_inferior (ptid_t ptid)
4762 {
4763 debug_target.to_post_startup_inferior (ptid);
4764
4765 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4766 ptid_get_pid (ptid));
4767 }
4768
4769 static int
4770 debug_to_insert_fork_catchpoint (int pid)
4771 {
4772 int retval;
4773
4774 retval = debug_target.to_insert_fork_catchpoint (pid);
4775
4776 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4777 pid, retval);
4778
4779 return retval;
4780 }
4781
4782 static int
4783 debug_to_remove_fork_catchpoint (int pid)
4784 {
4785 int retval;
4786
4787 retval = debug_target.to_remove_fork_catchpoint (pid);
4788
4789 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4790 pid, retval);
4791
4792 return retval;
4793 }
4794
4795 static int
4796 debug_to_insert_vfork_catchpoint (int pid)
4797 {
4798 int retval;
4799
4800 retval = debug_target.to_insert_vfork_catchpoint (pid);
4801
4802 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4803 pid, retval);
4804
4805 return retval;
4806 }
4807
4808 static int
4809 debug_to_remove_vfork_catchpoint (int pid)
4810 {
4811 int retval;
4812
4813 retval = debug_target.to_remove_vfork_catchpoint (pid);
4814
4815 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4816 pid, retval);
4817
4818 return retval;
4819 }
4820
4821 static int
4822 debug_to_insert_exec_catchpoint (int pid)
4823 {
4824 int retval;
4825
4826 retval = debug_target.to_insert_exec_catchpoint (pid);
4827
4828 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4829 pid, retval);
4830
4831 return retval;
4832 }
4833
4834 static int
4835 debug_to_remove_exec_catchpoint (int pid)
4836 {
4837 int retval;
4838
4839 retval = debug_target.to_remove_exec_catchpoint (pid);
4840
4841 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4842 pid, retval);
4843
4844 return retval;
4845 }
4846
4847 static int
4848 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4849 {
4850 int has_exited;
4851
4852 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4853
4854 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4855 pid, wait_status, *exit_status, has_exited);
4856
4857 return has_exited;
4858 }
4859
4860 static int
4861 debug_to_can_run (void)
4862 {
4863 int retval;
4864
4865 retval = debug_target.to_can_run ();
4866
4867 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4868
4869 return retval;
4870 }
4871
4872 static struct gdbarch *
4873 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4874 {
4875 struct gdbarch *retval;
4876
4877 retval = debug_target.to_thread_architecture (ops, ptid);
4878
4879 fprintf_unfiltered (gdb_stdlog,
4880 "target_thread_architecture (%s) = %s [%s]\n",
4881 target_pid_to_str (ptid),
4882 host_address_to_string (retval),
4883 gdbarch_bfd_arch_info (retval)->printable_name);
4884 return retval;
4885 }
4886
4887 static void
4888 debug_to_stop (ptid_t ptid)
4889 {
4890 debug_target.to_stop (ptid);
4891
4892 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4893 target_pid_to_str (ptid));
4894 }
4895
4896 static void
4897 debug_to_rcmd (char *command,
4898 struct ui_file *outbuf)
4899 {
4900 debug_target.to_rcmd (command, outbuf);
4901 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4902 }
4903
4904 static char *
4905 debug_to_pid_to_exec_file (int pid)
4906 {
4907 char *exec_file;
4908
4909 exec_file = debug_target.to_pid_to_exec_file (pid);
4910
4911 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4912 pid, exec_file);
4913
4914 return exec_file;
4915 }
4916
4917 static void
4918 setup_target_debug (void)
4919 {
4920 memcpy (&debug_target, &current_target, sizeof debug_target);
4921
4922 current_target.to_open = debug_to_open;
4923 current_target.to_post_attach = debug_to_post_attach;
4924 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4925 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4926 current_target.to_files_info = debug_to_files_info;
4927 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4928 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4929 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4930 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4931 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4932 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4933 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4934 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4935 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4936 current_target.to_watchpoint_addr_within_range
4937 = debug_to_watchpoint_addr_within_range;
4938 current_target.to_region_ok_for_hw_watchpoint
4939 = debug_to_region_ok_for_hw_watchpoint;
4940 current_target.to_can_accel_watchpoint_condition
4941 = debug_to_can_accel_watchpoint_condition;
4942 current_target.to_terminal_init = debug_to_terminal_init;
4943 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4944 current_target.to_terminal_ours_for_output
4945 = debug_to_terminal_ours_for_output;
4946 current_target.to_terminal_ours = debug_to_terminal_ours;
4947 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4948 current_target.to_terminal_info = debug_to_terminal_info;
4949 current_target.to_load = debug_to_load;
4950 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4951 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4952 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4953 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4954 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4955 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4956 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4957 current_target.to_has_exited = debug_to_has_exited;
4958 current_target.to_can_run = debug_to_can_run;
4959 current_target.to_stop = debug_to_stop;
4960 current_target.to_rcmd = debug_to_rcmd;
4961 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4962 current_target.to_thread_architecture = debug_to_thread_architecture;
4963 }
4964 \f
4965
4966 static char targ_desc[] =
4967 "Names of targets and files being debugged.\nShows the entire \
4968 stack of targets currently in use (including the exec-file,\n\
4969 core-file, and process, if any), as well as the symbol file name.";
4970
4971 static void
4972 do_monitor_command (char *cmd,
4973 int from_tty)
4974 {
4975 if ((current_target.to_rcmd
4976 == (void (*) (char *, struct ui_file *)) tcomplain)
4977 || (current_target.to_rcmd == debug_to_rcmd
4978 && (debug_target.to_rcmd
4979 == (void (*) (char *, struct ui_file *)) tcomplain)))
4980 error (_("\"monitor\" command not supported by this target."));
4981 target_rcmd (cmd, gdb_stdtarg);
4982 }
4983
4984 /* Print the name of each layers of our target stack. */
4985
4986 static void
4987 maintenance_print_target_stack (char *cmd, int from_tty)
4988 {
4989 struct target_ops *t;
4990
4991 printf_filtered (_("The current target stack is:\n"));
4992
4993 for (t = target_stack; t != NULL; t = t->beneath)
4994 {
4995 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4996 }
4997 }
4998
4999 /* Controls if async mode is permitted. */
5000 int target_async_permitted = 0;
5001
5002 /* The set command writes to this variable. If the inferior is
5003 executing, target_async_permitted is *not* updated. */
5004 static int target_async_permitted_1 = 0;
5005
5006 static void
5007 set_target_async_command (char *args, int from_tty,
5008 struct cmd_list_element *c)
5009 {
5010 if (have_live_inferiors ())
5011 {
5012 target_async_permitted_1 = target_async_permitted;
5013 error (_("Cannot change this setting while the inferior is running."));
5014 }
5015
5016 target_async_permitted = target_async_permitted_1;
5017 }
5018
5019 static void
5020 show_target_async_command (struct ui_file *file, int from_tty,
5021 struct cmd_list_element *c,
5022 const char *value)
5023 {
5024 fprintf_filtered (file,
5025 _("Controlling the inferior in "
5026 "asynchronous mode is %s.\n"), value);
5027 }
5028
5029 /* Temporary copies of permission settings. */
5030
5031 static int may_write_registers_1 = 1;
5032 static int may_write_memory_1 = 1;
5033 static int may_insert_breakpoints_1 = 1;
5034 static int may_insert_tracepoints_1 = 1;
5035 static int may_insert_fast_tracepoints_1 = 1;
5036 static int may_stop_1 = 1;
5037
5038 /* Make the user-set values match the real values again. */
5039
5040 void
5041 update_target_permissions (void)
5042 {
5043 may_write_registers_1 = may_write_registers;
5044 may_write_memory_1 = may_write_memory;
5045 may_insert_breakpoints_1 = may_insert_breakpoints;
5046 may_insert_tracepoints_1 = may_insert_tracepoints;
5047 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5048 may_stop_1 = may_stop;
5049 }
5050
5051 /* The one function handles (most of) the permission flags in the same
5052 way. */
5053
5054 static void
5055 set_target_permissions (char *args, int from_tty,
5056 struct cmd_list_element *c)
5057 {
5058 if (target_has_execution)
5059 {
5060 update_target_permissions ();
5061 error (_("Cannot change this setting while the inferior is running."));
5062 }
5063
5064 /* Make the real values match the user-changed values. */
5065 may_write_registers = may_write_registers_1;
5066 may_insert_breakpoints = may_insert_breakpoints_1;
5067 may_insert_tracepoints = may_insert_tracepoints_1;
5068 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5069 may_stop = may_stop_1;
5070 update_observer_mode ();
5071 }
5072
5073 /* Set memory write permission independently of observer mode. */
5074
5075 static void
5076 set_write_memory_permission (char *args, int from_tty,
5077 struct cmd_list_element *c)
5078 {
5079 /* Make the real values match the user-changed values. */
5080 may_write_memory = may_write_memory_1;
5081 update_observer_mode ();
5082 }
5083
5084
5085 void
5086 initialize_targets (void)
5087 {
5088 init_dummy_target ();
5089 push_target (&dummy_target);
5090
5091 add_info ("target", target_info, targ_desc);
5092 add_info ("files", target_info, targ_desc);
5093
5094 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5095 Set target debugging."), _("\
5096 Show target debugging."), _("\
5097 When non-zero, target debugging is enabled. Higher numbers are more\n\
5098 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5099 command."),
5100 NULL,
5101 show_targetdebug,
5102 &setdebuglist, &showdebuglist);
5103
5104 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5105 &trust_readonly, _("\
5106 Set mode for reading from readonly sections."), _("\
5107 Show mode for reading from readonly sections."), _("\
5108 When this mode is on, memory reads from readonly sections (such as .text)\n\
5109 will be read from the object file instead of from the target. This will\n\
5110 result in significant performance improvement for remote targets."),
5111 NULL,
5112 show_trust_readonly,
5113 &setlist, &showlist);
5114
5115 add_com ("monitor", class_obscure, do_monitor_command,
5116 _("Send a command to the remote monitor (remote targets only)."));
5117
5118 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5119 _("Print the name of each layer of the internal target stack."),
5120 &maintenanceprintlist);
5121
5122 add_setshow_boolean_cmd ("target-async", no_class,
5123 &target_async_permitted_1, _("\
5124 Set whether gdb controls the inferior in asynchronous mode."), _("\
5125 Show whether gdb controls the inferior in asynchronous mode."), _("\
5126 Tells gdb whether to control the inferior in asynchronous mode."),
5127 set_target_async_command,
5128 show_target_async_command,
5129 &setlist,
5130 &showlist);
5131
5132 add_setshow_boolean_cmd ("may-write-registers", class_support,
5133 &may_write_registers_1, _("\
5134 Set permission to write into registers."), _("\
5135 Show permission to write into registers."), _("\
5136 When this permission is on, GDB may write into the target's registers.\n\
5137 Otherwise, any sort of write attempt will result in an error."),
5138 set_target_permissions, NULL,
5139 &setlist, &showlist);
5140
5141 add_setshow_boolean_cmd ("may-write-memory", class_support,
5142 &may_write_memory_1, _("\
5143 Set permission to write into target memory."), _("\
5144 Show permission to write into target memory."), _("\
5145 When this permission is on, GDB may write into the target's memory.\n\
5146 Otherwise, any sort of write attempt will result in an error."),
5147 set_write_memory_permission, NULL,
5148 &setlist, &showlist);
5149
5150 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5151 &may_insert_breakpoints_1, _("\
5152 Set permission to insert breakpoints in the target."), _("\
5153 Show permission to insert breakpoints in the target."), _("\
5154 When this permission is on, GDB may insert breakpoints in the program.\n\
5155 Otherwise, any sort of insertion attempt will result in an error."),
5156 set_target_permissions, NULL,
5157 &setlist, &showlist);
5158
5159 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5160 &may_insert_tracepoints_1, _("\
5161 Set permission to insert tracepoints in the target."), _("\
5162 Show permission to insert tracepoints in the target."), _("\
5163 When this permission is on, GDB may insert tracepoints in the program.\n\
5164 Otherwise, any sort of insertion attempt will result in an error."),
5165 set_target_permissions, NULL,
5166 &setlist, &showlist);
5167
5168 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5169 &may_insert_fast_tracepoints_1, _("\
5170 Set permission to insert fast tracepoints in the target."), _("\
5171 Show permission to insert fast tracepoints in the target."), _("\
5172 When this permission is on, GDB may insert fast tracepoints.\n\
5173 Otherwise, any sort of insertion attempt will result in an error."),
5174 set_target_permissions, NULL,
5175 &setlist, &showlist);
5176
5177 add_setshow_boolean_cmd ("may-interrupt", class_support,
5178 &may_stop_1, _("\
5179 Set permission to interrupt or signal the target."), _("\
5180 Show permission to interrupt or signal the target."), _("\
5181 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5182 Otherwise, any attempt to interrupt or stop will be ignored."),
5183 set_target_permissions, NULL,
5184 &setlist, &showlist);
5185 }
This page took 0.218482 seconds and 4 git commands to generate.