gdb:
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdb_wait.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_terminal_info (char *, int);
146
147 static void debug_to_load (char *, int);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_stop (ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_index;
159 unsigned target_struct_allocsize;
160 #define DEFAULT_ALLOCSIZE 10
161
162 /* The initial current target, so that there is always a semi-valid
163 current target. */
164
165 static struct target_ops dummy_target;
166
167 /* Top of target stack. */
168
169 static struct target_ops *target_stack;
170
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174 struct target_ops current_target;
175
176 /* Command list for target. */
177
178 static struct cmd_list_element *targetlist = NULL;
179
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183 static int trust_readonly = 0;
184
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188 static int show_memory_breakpoints = 0;
189
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194 int may_write_registers = 1;
195
196 int may_write_memory = 1;
197
198 int may_insert_breakpoints = 1;
199
200 int may_insert_tracepoints = 1;
201
202 int may_insert_fast_tracepoints = 1;
203
204 int may_stop = 1;
205
206 /* Non-zero if we want to see trace of target level stuff. */
207
208 static int targetdebug = 0;
209 static void
210 show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212 {
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214 }
215
216 static void setup_target_debug (void);
217
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1 = 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p = 1;
224
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230 static void
231 set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233 {
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238 }
239
240 static void
241 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243 {
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245 }
246
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE *target_dcache;
249
250 /* Invalidate the target dcache. */
251
252 void
253 target_dcache_invalidate (void)
254 {
255 dcache_invalidate (target_dcache);
256 }
257
258 /* The user just typed 'target' without the name of a target. */
259
260 static void
261 target_command (char *arg, int from_tty)
262 {
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265 }
266
267 /* Default target_has_* methods for process_stratum targets. */
268
269 int
270 default_child_has_all_memory (struct target_ops *ops)
271 {
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_memory (struct target_ops *ops)
281 {
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289 int
290 default_child_has_stack (struct target_ops *ops)
291 {
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297 }
298
299 int
300 default_child_has_registers (struct target_ops *ops)
301 {
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307 }
308
309 int
310 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
311 {
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
314 if (ptid_equal (the_ptid, null_ptid))
315 return 0;
316
317 return 1;
318 }
319
320
321 int
322 target_has_all_memory_1 (void)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_memory_1 (void)
335 {
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343 }
344
345 int
346 target_has_stack_1 (void)
347 {
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355 }
356
357 int
358 target_has_registers_1 (void)
359 {
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367 }
368
369 int
370 target_has_execution_1 (ptid_t the_ptid)
371 {
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
375 if (t->to_has_execution (t, the_ptid))
376 return 1;
377
378 return 0;
379 }
380
381 int
382 target_has_execution_current (void)
383 {
384 return target_has_execution_1 (inferior_ptid);
385 }
386
387 /* Add a possible target architecture to the list. */
388
389 void
390 add_target (struct target_ops *t)
391 {
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435 }
436
437 /* Stub functions */
438
439 void
440 target_ignore (void)
441 {
442 }
443
444 void
445 target_kill (void)
446 {
447 struct target_ops *t;
448
449 for (t = current_target.beneath; t != NULL; t = t->beneath)
450 if (t->to_kill != NULL)
451 {
452 if (targetdebug)
453 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
454
455 t->to_kill (t);
456 return;
457 }
458
459 noprocess ();
460 }
461
462 void
463 target_load (char *arg, int from_tty)
464 {
465 target_dcache_invalidate ();
466 (*current_target.to_load) (arg, from_tty);
467 }
468
469 void
470 target_create_inferior (char *exec_file, char *args,
471 char **env, int from_tty)
472 {
473 struct target_ops *t;
474
475 for (t = current_target.beneath; t != NULL; t = t->beneath)
476 {
477 if (t->to_create_inferior != NULL)
478 {
479 t->to_create_inferior (t, exec_file, args, env, from_tty);
480 if (targetdebug)
481 fprintf_unfiltered (gdb_stdlog,
482 "target_create_inferior (%s, %s, xxx, %d)\n",
483 exec_file, args, from_tty);
484 return;
485 }
486 }
487
488 internal_error (__FILE__, __LINE__,
489 _("could not find a target to create inferior"));
490 }
491
492 void
493 target_terminal_inferior (void)
494 {
495 /* A background resume (``run&'') should leave GDB in control of the
496 terminal. Use target_can_async_p, not target_is_async_p, since at
497 this point the target is not async yet. However, if sync_execution
498 is not set, we know it will become async prior to resume. */
499 if (target_can_async_p () && !sync_execution)
500 return;
501
502 /* If GDB is resuming the inferior in the foreground, install
503 inferior's terminal modes. */
504 (*current_target.to_terminal_inferior) ();
505 }
506
507 static int
508 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
509 struct target_ops *t)
510 {
511 errno = EIO; /* Can't read/write this location. */
512 return 0; /* No bytes handled. */
513 }
514
515 static void
516 tcomplain (void)
517 {
518 error (_("You can't do that when your target is `%s'"),
519 current_target.to_shortname);
520 }
521
522 void
523 noprocess (void)
524 {
525 error (_("You can't do that without a process to debug."));
526 }
527
528 static void
529 default_terminal_info (char *args, int from_tty)
530 {
531 printf_unfiltered (_("No saved terminal information.\n"));
532 }
533
534 /* A default implementation for the to_get_ada_task_ptid target method.
535
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
538 inferior_ptid. */
539
540 static ptid_t
541 default_get_ada_task_ptid (long lwp, long tid)
542 {
543 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
544 }
545
546 static enum exec_direction_kind
547 default_execution_direction (void)
548 {
549 if (!target_can_execute_reverse)
550 return EXEC_FORWARD;
551 else if (!target_can_async_p ())
552 return EXEC_FORWARD;
553 else
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
556 }
557
558 /* Go through the target stack from top to bottom, copying over zero
559 entries in current_target, then filling in still empty entries. In
560 effect, we are doing class inheritance through the pushed target
561 vectors.
562
563 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
564 is currently implemented, is that it discards any knowledge of
565 which target an inherited method originally belonged to.
566 Consequently, new new target methods should instead explicitly and
567 locally search the target stack for the target that can handle the
568 request. */
569
570 static void
571 update_current_target (void)
572 {
573 struct target_ops *t;
574
575 /* First, reset current's contents. */
576 memset (&current_target, 0, sizeof (current_target));
577
578 #define INHERIT(FIELD, TARGET) \
579 if (!current_target.FIELD) \
580 current_target.FIELD = (TARGET)->FIELD
581
582 for (t = target_stack; t; t = t->beneath)
583 {
584 INHERIT (to_shortname, t);
585 INHERIT (to_longname, t);
586 INHERIT (to_doc, t);
587 /* Do not inherit to_open. */
588 /* Do not inherit to_close. */
589 /* Do not inherit to_attach. */
590 INHERIT (to_post_attach, t);
591 INHERIT (to_attach_no_wait, t);
592 /* Do not inherit to_detach. */
593 /* Do not inherit to_disconnect. */
594 /* Do not inherit to_resume. */
595 /* Do not inherit to_wait. */
596 /* Do not inherit to_fetch_registers. */
597 /* Do not inherit to_store_registers. */
598 INHERIT (to_prepare_to_store, t);
599 INHERIT (deprecated_xfer_memory, t);
600 INHERIT (to_files_info, t);
601 INHERIT (to_insert_breakpoint, t);
602 INHERIT (to_remove_breakpoint, t);
603 INHERIT (to_can_use_hw_breakpoint, t);
604 INHERIT (to_insert_hw_breakpoint, t);
605 INHERIT (to_remove_hw_breakpoint, t);
606 /* Do not inherit to_ranged_break_num_registers. */
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 /* Do not inherit to_insert_mask_watchpoint. */
610 /* Do not inherit to_remove_mask_watchpoint. */
611 INHERIT (to_stopped_data_address, t);
612 INHERIT (to_have_steppable_watchpoint, t);
613 INHERIT (to_have_continuable_watchpoint, t);
614 INHERIT (to_stopped_by_watchpoint, t);
615 INHERIT (to_watchpoint_addr_within_range, t);
616 INHERIT (to_region_ok_for_hw_watchpoint, t);
617 INHERIT (to_can_accel_watchpoint_condition, t);
618 /* Do not inherit to_masked_watch_num_registers. */
619 INHERIT (to_terminal_init, t);
620 INHERIT (to_terminal_inferior, t);
621 INHERIT (to_terminal_ours_for_output, t);
622 INHERIT (to_terminal_ours, t);
623 INHERIT (to_terminal_save_ours, t);
624 INHERIT (to_terminal_info, t);
625 /* Do not inherit to_kill. */
626 INHERIT (to_load, t);
627 /* Do no inherit to_create_inferior. */
628 INHERIT (to_post_startup_inferior, t);
629 INHERIT (to_insert_fork_catchpoint, t);
630 INHERIT (to_remove_fork_catchpoint, t);
631 INHERIT (to_insert_vfork_catchpoint, t);
632 INHERIT (to_remove_vfork_catchpoint, t);
633 /* Do not inherit to_follow_fork. */
634 INHERIT (to_insert_exec_catchpoint, t);
635 INHERIT (to_remove_exec_catchpoint, t);
636 INHERIT (to_set_syscall_catchpoint, t);
637 INHERIT (to_has_exited, t);
638 /* Do not inherit to_mourn_inferior. */
639 INHERIT (to_can_run, t);
640 /* Do not inherit to_pass_signals. */
641 /* Do not inherit to_thread_alive. */
642 /* Do not inherit to_find_new_threads. */
643 /* Do not inherit to_pid_to_str. */
644 INHERIT (to_extra_thread_info, t);
645 INHERIT (to_thread_name, t);
646 INHERIT (to_stop, t);
647 /* Do not inherit to_xfer_partial. */
648 INHERIT (to_rcmd, t);
649 INHERIT (to_pid_to_exec_file, t);
650 INHERIT (to_log_command, t);
651 INHERIT (to_stratum, t);
652 /* Do not inherit to_has_all_memory. */
653 /* Do not inherit to_has_memory. */
654 /* Do not inherit to_has_stack. */
655 /* Do not inherit to_has_registers. */
656 /* Do not inherit to_has_execution. */
657 INHERIT (to_has_thread_control, t);
658 INHERIT (to_can_async_p, t);
659 INHERIT (to_is_async_p, t);
660 INHERIT (to_async, t);
661 INHERIT (to_find_memory_regions, t);
662 INHERIT (to_make_corefile_notes, t);
663 INHERIT (to_get_bookmark, t);
664 INHERIT (to_goto_bookmark, t);
665 /* Do not inherit to_get_thread_local_address. */
666 INHERIT (to_can_execute_reverse, t);
667 INHERIT (to_execution_direction, t);
668 INHERIT (to_thread_architecture, t);
669 /* Do not inherit to_read_description. */
670 INHERIT (to_get_ada_task_ptid, t);
671 /* Do not inherit to_search_memory. */
672 INHERIT (to_supports_multi_process, t);
673 INHERIT (to_supports_enable_disable_tracepoint, t);
674 INHERIT (to_supports_string_tracing, t);
675 INHERIT (to_trace_init, t);
676 INHERIT (to_download_tracepoint, t);
677 INHERIT (to_can_download_tracepoint, t);
678 INHERIT (to_download_trace_state_variable, t);
679 INHERIT (to_enable_tracepoint, t);
680 INHERIT (to_disable_tracepoint, t);
681 INHERIT (to_trace_set_readonly_regions, t);
682 INHERIT (to_trace_start, t);
683 INHERIT (to_get_trace_status, t);
684 INHERIT (to_get_tracepoint_status, t);
685 INHERIT (to_trace_stop, t);
686 INHERIT (to_trace_find, t);
687 INHERIT (to_get_trace_state_variable_value, t);
688 INHERIT (to_save_trace_data, t);
689 INHERIT (to_upload_tracepoints, t);
690 INHERIT (to_upload_trace_state_variables, t);
691 INHERIT (to_get_raw_trace_data, t);
692 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
693 INHERIT (to_set_disconnected_tracing, t);
694 INHERIT (to_set_circular_trace_buffer, t);
695 INHERIT (to_set_trace_notes, t);
696 INHERIT (to_get_tib_address, t);
697 INHERIT (to_set_permissions, t);
698 INHERIT (to_static_tracepoint_marker_at, t);
699 INHERIT (to_static_tracepoint_markers_by_strid, t);
700 INHERIT (to_traceframe_info, t);
701 INHERIT (to_use_agent, t);
702 INHERIT (to_can_use_agent, t);
703 INHERIT (to_magic, t);
704 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
705 /* Do not inherit to_memory_map. */
706 /* Do not inherit to_flash_erase. */
707 /* Do not inherit to_flash_done. */
708 }
709 #undef INHERIT
710
711 /* Clean up a target struct so it no longer has any zero pointers in
712 it. Some entries are defaulted to a method that print an error,
713 others are hard-wired to a standard recursive default. */
714
715 #define de_fault(field, value) \
716 if (!current_target.field) \
717 current_target.field = value
718
719 de_fault (to_open,
720 (void (*) (char *, int))
721 tcomplain);
722 de_fault (to_close,
723 (void (*) (int))
724 target_ignore);
725 de_fault (to_post_attach,
726 (void (*) (int))
727 target_ignore);
728 de_fault (to_prepare_to_store,
729 (void (*) (struct regcache *))
730 noprocess);
731 de_fault (deprecated_xfer_memory,
732 (int (*) (CORE_ADDR, gdb_byte *, int, int,
733 struct mem_attrib *, struct target_ops *))
734 nomemory);
735 de_fault (to_files_info,
736 (void (*) (struct target_ops *))
737 target_ignore);
738 de_fault (to_insert_breakpoint,
739 memory_insert_breakpoint);
740 de_fault (to_remove_breakpoint,
741 memory_remove_breakpoint);
742 de_fault (to_can_use_hw_breakpoint,
743 (int (*) (int, int, int))
744 return_zero);
745 de_fault (to_insert_hw_breakpoint,
746 (int (*) (struct gdbarch *, struct bp_target_info *))
747 return_minus_one);
748 de_fault (to_remove_hw_breakpoint,
749 (int (*) (struct gdbarch *, struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_insert_watchpoint,
752 (int (*) (CORE_ADDR, int, int, struct expression *))
753 return_minus_one);
754 de_fault (to_remove_watchpoint,
755 (int (*) (CORE_ADDR, int, int, struct expression *))
756 return_minus_one);
757 de_fault (to_stopped_by_watchpoint,
758 (int (*) (void))
759 return_zero);
760 de_fault (to_stopped_data_address,
761 (int (*) (struct target_ops *, CORE_ADDR *))
762 return_zero);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (CORE_ADDR, int, int, struct expression *))
769 return_zero);
770 de_fault (to_terminal_init,
771 (void (*) (void))
772 target_ignore);
773 de_fault (to_terminal_inferior,
774 (void (*) (void))
775 target_ignore);
776 de_fault (to_terminal_ours_for_output,
777 (void (*) (void))
778 target_ignore);
779 de_fault (to_terminal_ours,
780 (void (*) (void))
781 target_ignore);
782 de_fault (to_terminal_save_ours,
783 (void (*) (void))
784 target_ignore);
785 de_fault (to_terminal_info,
786 default_terminal_info);
787 de_fault (to_load,
788 (void (*) (char *, int))
789 tcomplain);
790 de_fault (to_post_startup_inferior,
791 (void (*) (ptid_t))
792 target_ignore);
793 de_fault (to_insert_fork_catchpoint,
794 (int (*) (int))
795 return_one);
796 de_fault (to_remove_fork_catchpoint,
797 (int (*) (int))
798 return_one);
799 de_fault (to_insert_vfork_catchpoint,
800 (int (*) (int))
801 return_one);
802 de_fault (to_remove_vfork_catchpoint,
803 (int (*) (int))
804 return_one);
805 de_fault (to_insert_exec_catchpoint,
806 (int (*) (int))
807 return_one);
808 de_fault (to_remove_exec_catchpoint,
809 (int (*) (int))
810 return_one);
811 de_fault (to_set_syscall_catchpoint,
812 (int (*) (int, int, int, int, int *))
813 return_one);
814 de_fault (to_has_exited,
815 (int (*) (int, int, int *))
816 return_zero);
817 de_fault (to_can_run,
818 return_zero);
819 de_fault (to_extra_thread_info,
820 (char *(*) (struct thread_info *))
821 return_zero);
822 de_fault (to_thread_name,
823 (char *(*) (struct thread_info *))
824 return_zero);
825 de_fault (to_stop,
826 (void (*) (ptid_t))
827 target_ignore);
828 current_target.to_xfer_partial = current_xfer_partial;
829 de_fault (to_rcmd,
830 (void (*) (char *, struct ui_file *))
831 tcomplain);
832 de_fault (to_pid_to_exec_file,
833 (char *(*) (int))
834 return_zero);
835 de_fault (to_async,
836 (void (*) (void (*) (enum inferior_event_type, void*), void*))
837 tcomplain);
838 de_fault (to_thread_architecture,
839 default_thread_architecture);
840 current_target.to_read_description = NULL;
841 de_fault (to_get_ada_task_ptid,
842 (ptid_t (*) (long, long))
843 default_get_ada_task_ptid);
844 de_fault (to_supports_multi_process,
845 (int (*) (void))
846 return_zero);
847 de_fault (to_supports_enable_disable_tracepoint,
848 (int (*) (void))
849 return_zero);
850 de_fault (to_supports_string_tracing,
851 (int (*) (void))
852 return_zero);
853 de_fault (to_trace_init,
854 (void (*) (void))
855 tcomplain);
856 de_fault (to_download_tracepoint,
857 (void (*) (struct bp_location *))
858 tcomplain);
859 de_fault (to_can_download_tracepoint,
860 (int (*) (void))
861 return_zero);
862 de_fault (to_download_trace_state_variable,
863 (void (*) (struct trace_state_variable *))
864 tcomplain);
865 de_fault (to_enable_tracepoint,
866 (void (*) (struct bp_location *))
867 tcomplain);
868 de_fault (to_disable_tracepoint,
869 (void (*) (struct bp_location *))
870 tcomplain);
871 de_fault (to_trace_set_readonly_regions,
872 (void (*) (void))
873 tcomplain);
874 de_fault (to_trace_start,
875 (void (*) (void))
876 tcomplain);
877 de_fault (to_get_trace_status,
878 (int (*) (struct trace_status *))
879 return_minus_one);
880 de_fault (to_get_tracepoint_status,
881 (void (*) (struct breakpoint *, struct uploaded_tp *))
882 tcomplain);
883 de_fault (to_trace_stop,
884 (void (*) (void))
885 tcomplain);
886 de_fault (to_trace_find,
887 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
888 return_minus_one);
889 de_fault (to_get_trace_state_variable_value,
890 (int (*) (int, LONGEST *))
891 return_zero);
892 de_fault (to_save_trace_data,
893 (int (*) (const char *))
894 tcomplain);
895 de_fault (to_upload_tracepoints,
896 (int (*) (struct uploaded_tp **))
897 return_zero);
898 de_fault (to_upload_trace_state_variables,
899 (int (*) (struct uploaded_tsv **))
900 return_zero);
901 de_fault (to_get_raw_trace_data,
902 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
903 tcomplain);
904 de_fault (to_get_min_fast_tracepoint_insn_len,
905 (int (*) (void))
906 return_minus_one);
907 de_fault (to_set_disconnected_tracing,
908 (void (*) (int))
909 target_ignore);
910 de_fault (to_set_circular_trace_buffer,
911 (void (*) (int))
912 target_ignore);
913 de_fault (to_set_trace_notes,
914 (int (*) (char *, char *, char *))
915 return_zero);
916 de_fault (to_get_tib_address,
917 (int (*) (ptid_t, CORE_ADDR *))
918 tcomplain);
919 de_fault (to_set_permissions,
920 (void (*) (void))
921 target_ignore);
922 de_fault (to_static_tracepoint_marker_at,
923 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
924 return_zero);
925 de_fault (to_static_tracepoint_markers_by_strid,
926 (VEC(static_tracepoint_marker_p) * (*) (const char *))
927 tcomplain);
928 de_fault (to_traceframe_info,
929 (struct traceframe_info * (*) (void))
930 tcomplain);
931 de_fault (to_supports_evaluation_of_breakpoint_conditions,
932 (int (*) (void))
933 return_zero);
934 de_fault (to_use_agent,
935 (int (*) (int))
936 tcomplain);
937 de_fault (to_can_use_agent,
938 (int (*) (void))
939 return_zero);
940 de_fault (to_execution_direction, default_execution_direction);
941
942 #undef de_fault
943
944 /* Finally, position the target-stack beneath the squashed
945 "current_target". That way code looking for a non-inherited
946 target method can quickly and simply find it. */
947 current_target.beneath = target_stack;
948
949 if (targetdebug)
950 setup_target_debug ();
951 }
952
953 /* Push a new target type into the stack of the existing target accessors,
954 possibly superseding some of the existing accessors.
955
956 Rather than allow an empty stack, we always have the dummy target at
957 the bottom stratum, so we can call the function vectors without
958 checking them. */
959
960 void
961 push_target (struct target_ops *t)
962 {
963 struct target_ops **cur;
964
965 /* Check magic number. If wrong, it probably means someone changed
966 the struct definition, but not all the places that initialize one. */
967 if (t->to_magic != OPS_MAGIC)
968 {
969 fprintf_unfiltered (gdb_stderr,
970 "Magic number of %s target struct wrong\n",
971 t->to_shortname);
972 internal_error (__FILE__, __LINE__,
973 _("failed internal consistency check"));
974 }
975
976 /* Find the proper stratum to install this target in. */
977 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
978 {
979 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
980 break;
981 }
982
983 /* If there's already targets at this stratum, remove them. */
984 /* FIXME: cagney/2003-10-15: I think this should be popping all
985 targets to CUR, and not just those at this stratum level. */
986 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
987 {
988 /* There's already something at this stratum level. Close it,
989 and un-hook it from the stack. */
990 struct target_ops *tmp = (*cur);
991
992 (*cur) = (*cur)->beneath;
993 tmp->beneath = NULL;
994 target_close (tmp, 0);
995 }
996
997 /* We have removed all targets in our stratum, now add the new one. */
998 t->beneath = (*cur);
999 (*cur) = t;
1000
1001 update_current_target ();
1002 }
1003
1004 /* Remove a target_ops vector from the stack, wherever it may be.
1005 Return how many times it was removed (0 or 1). */
1006
1007 int
1008 unpush_target (struct target_ops *t)
1009 {
1010 struct target_ops **cur;
1011 struct target_ops *tmp;
1012
1013 if (t->to_stratum == dummy_stratum)
1014 internal_error (__FILE__, __LINE__,
1015 _("Attempt to unpush the dummy target"));
1016
1017 /* Look for the specified target. Note that we assume that a target
1018 can only occur once in the target stack. */
1019
1020 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1021 {
1022 if ((*cur) == t)
1023 break;
1024 }
1025
1026 /* If we don't find target_ops, quit. Only open targets should be
1027 closed. */
1028 if ((*cur) == NULL)
1029 return 0;
1030
1031 /* Unchain the target. */
1032 tmp = (*cur);
1033 (*cur) = (*cur)->beneath;
1034 tmp->beneath = NULL;
1035
1036 update_current_target ();
1037
1038 /* Finally close the target. Note we do this after unchaining, so
1039 any target method calls from within the target_close
1040 implementation don't end up in T anymore. */
1041 target_close (t, 0);
1042
1043 return 1;
1044 }
1045
1046 void
1047 pop_target (void)
1048 {
1049 target_close (target_stack, 0); /* Let it clean up. */
1050 if (unpush_target (target_stack) == 1)
1051 return;
1052
1053 fprintf_unfiltered (gdb_stderr,
1054 "pop_target couldn't find target %s\n",
1055 current_target.to_shortname);
1056 internal_error (__FILE__, __LINE__,
1057 _("failed internal consistency check"));
1058 }
1059
1060 void
1061 pop_all_targets_above (enum strata above_stratum, int quitting)
1062 {
1063 while ((int) (current_target.to_stratum) > (int) above_stratum)
1064 {
1065 target_close (target_stack, quitting);
1066 if (!unpush_target (target_stack))
1067 {
1068 fprintf_unfiltered (gdb_stderr,
1069 "pop_all_targets couldn't find target %s\n",
1070 target_stack->to_shortname);
1071 internal_error (__FILE__, __LINE__,
1072 _("failed internal consistency check"));
1073 break;
1074 }
1075 }
1076 }
1077
1078 void
1079 pop_all_targets (int quitting)
1080 {
1081 pop_all_targets_above (dummy_stratum, quitting);
1082 }
1083
1084 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1085
1086 int
1087 target_is_pushed (struct target_ops *t)
1088 {
1089 struct target_ops **cur;
1090
1091 /* Check magic number. If wrong, it probably means someone changed
1092 the struct definition, but not all the places that initialize one. */
1093 if (t->to_magic != OPS_MAGIC)
1094 {
1095 fprintf_unfiltered (gdb_stderr,
1096 "Magic number of %s target struct wrong\n",
1097 t->to_shortname);
1098 internal_error (__FILE__, __LINE__,
1099 _("failed internal consistency check"));
1100 }
1101
1102 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1103 if (*cur == t)
1104 return 1;
1105
1106 return 0;
1107 }
1108
1109 /* Using the objfile specified in OBJFILE, find the address for the
1110 current thread's thread-local storage with offset OFFSET. */
1111 CORE_ADDR
1112 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1113 {
1114 volatile CORE_ADDR addr = 0;
1115 struct target_ops *target;
1116
1117 for (target = current_target.beneath;
1118 target != NULL;
1119 target = target->beneath)
1120 {
1121 if (target->to_get_thread_local_address != NULL)
1122 break;
1123 }
1124
1125 if (target != NULL
1126 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1127 {
1128 ptid_t ptid = inferior_ptid;
1129 volatile struct gdb_exception ex;
1130
1131 TRY_CATCH (ex, RETURN_MASK_ALL)
1132 {
1133 CORE_ADDR lm_addr;
1134
1135 /* Fetch the load module address for this objfile. */
1136 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1137 objfile);
1138 /* If it's 0, throw the appropriate exception. */
1139 if (lm_addr == 0)
1140 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1141 _("TLS load module not found"));
1142
1143 addr = target->to_get_thread_local_address (target, ptid,
1144 lm_addr, offset);
1145 }
1146 /* If an error occurred, print TLS related messages here. Otherwise,
1147 throw the error to some higher catcher. */
1148 if (ex.reason < 0)
1149 {
1150 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1151
1152 switch (ex.error)
1153 {
1154 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1155 error (_("Cannot find thread-local variables "
1156 "in this thread library."));
1157 break;
1158 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1159 if (objfile_is_library)
1160 error (_("Cannot find shared library `%s' in dynamic"
1161 " linker's load module list"), objfile->name);
1162 else
1163 error (_("Cannot find executable file `%s' in dynamic"
1164 " linker's load module list"), objfile->name);
1165 break;
1166 case TLS_NOT_ALLOCATED_YET_ERROR:
1167 if (objfile_is_library)
1168 error (_("The inferior has not yet allocated storage for"
1169 " thread-local variables in\n"
1170 "the shared library `%s'\n"
1171 "for %s"),
1172 objfile->name, target_pid_to_str (ptid));
1173 else
1174 error (_("The inferior has not yet allocated storage for"
1175 " thread-local variables in\n"
1176 "the executable `%s'\n"
1177 "for %s"),
1178 objfile->name, target_pid_to_str (ptid));
1179 break;
1180 case TLS_GENERIC_ERROR:
1181 if (objfile_is_library)
1182 error (_("Cannot find thread-local storage for %s, "
1183 "shared library %s:\n%s"),
1184 target_pid_to_str (ptid),
1185 objfile->name, ex.message);
1186 else
1187 error (_("Cannot find thread-local storage for %s, "
1188 "executable file %s:\n%s"),
1189 target_pid_to_str (ptid),
1190 objfile->name, ex.message);
1191 break;
1192 default:
1193 throw_exception (ex);
1194 break;
1195 }
1196 }
1197 }
1198 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1199 TLS is an ABI-specific thing. But we don't do that yet. */
1200 else
1201 error (_("Cannot find thread-local variables on this target"));
1202
1203 return addr;
1204 }
1205
1206 #undef MIN
1207 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1208
1209 /* target_read_string -- read a null terminated string, up to LEN bytes,
1210 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1211 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1212 is responsible for freeing it. Return the number of bytes successfully
1213 read. */
1214
1215 int
1216 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1217 {
1218 int tlen, origlen, offset, i;
1219 gdb_byte buf[4];
1220 int errcode = 0;
1221 char *buffer;
1222 int buffer_allocated;
1223 char *bufptr;
1224 unsigned int nbytes_read = 0;
1225
1226 gdb_assert (string);
1227
1228 /* Small for testing. */
1229 buffer_allocated = 4;
1230 buffer = xmalloc (buffer_allocated);
1231 bufptr = buffer;
1232
1233 origlen = len;
1234
1235 while (len > 0)
1236 {
1237 tlen = MIN (len, 4 - (memaddr & 3));
1238 offset = memaddr & 3;
1239
1240 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1241 if (errcode != 0)
1242 {
1243 /* The transfer request might have crossed the boundary to an
1244 unallocated region of memory. Retry the transfer, requesting
1245 a single byte. */
1246 tlen = 1;
1247 offset = 0;
1248 errcode = target_read_memory (memaddr, buf, 1);
1249 if (errcode != 0)
1250 goto done;
1251 }
1252
1253 if (bufptr - buffer + tlen > buffer_allocated)
1254 {
1255 unsigned int bytes;
1256
1257 bytes = bufptr - buffer;
1258 buffer_allocated *= 2;
1259 buffer = xrealloc (buffer, buffer_allocated);
1260 bufptr = buffer + bytes;
1261 }
1262
1263 for (i = 0; i < tlen; i++)
1264 {
1265 *bufptr++ = buf[i + offset];
1266 if (buf[i + offset] == '\000')
1267 {
1268 nbytes_read += i + 1;
1269 goto done;
1270 }
1271 }
1272
1273 memaddr += tlen;
1274 len -= tlen;
1275 nbytes_read += tlen;
1276 }
1277 done:
1278 *string = buffer;
1279 if (errnop != NULL)
1280 *errnop = errcode;
1281 return nbytes_read;
1282 }
1283
1284 struct target_section_table *
1285 target_get_section_table (struct target_ops *target)
1286 {
1287 struct target_ops *t;
1288
1289 if (targetdebug)
1290 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1291
1292 for (t = target; t != NULL; t = t->beneath)
1293 if (t->to_get_section_table != NULL)
1294 return (*t->to_get_section_table) (t);
1295
1296 return NULL;
1297 }
1298
1299 /* Find a section containing ADDR. */
1300
1301 struct target_section *
1302 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1303 {
1304 struct target_section_table *table = target_get_section_table (target);
1305 struct target_section *secp;
1306
1307 if (table == NULL)
1308 return NULL;
1309
1310 for (secp = table->sections; secp < table->sections_end; secp++)
1311 {
1312 if (addr >= secp->addr && addr < secp->endaddr)
1313 return secp;
1314 }
1315 return NULL;
1316 }
1317
1318 /* Read memory from the live target, even if currently inspecting a
1319 traceframe. The return is the same as that of target_read. */
1320
1321 static LONGEST
1322 target_read_live_memory (enum target_object object,
1323 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1324 {
1325 int ret;
1326 struct cleanup *cleanup;
1327
1328 /* Switch momentarily out of tfind mode so to access live memory.
1329 Note that this must not clear global state, such as the frame
1330 cache, which must still remain valid for the previous traceframe.
1331 We may be _building_ the frame cache at this point. */
1332 cleanup = make_cleanup_restore_traceframe_number ();
1333 set_traceframe_number (-1);
1334
1335 ret = target_read (current_target.beneath, object, NULL,
1336 myaddr, memaddr, len);
1337
1338 do_cleanups (cleanup);
1339 return ret;
1340 }
1341
1342 /* Using the set of read-only target sections of OPS, read live
1343 read-only memory. Note that the actual reads start from the
1344 top-most target again.
1345
1346 For interface/parameters/return description see target.h,
1347 to_xfer_partial. */
1348
1349 static LONGEST
1350 memory_xfer_live_readonly_partial (struct target_ops *ops,
1351 enum target_object object,
1352 gdb_byte *readbuf, ULONGEST memaddr,
1353 LONGEST len)
1354 {
1355 struct target_section *secp;
1356 struct target_section_table *table;
1357
1358 secp = target_section_by_addr (ops, memaddr);
1359 if (secp != NULL
1360 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1361 & SEC_READONLY))
1362 {
1363 struct target_section *p;
1364 ULONGEST memend = memaddr + len;
1365
1366 table = target_get_section_table (ops);
1367
1368 for (p = table->sections; p < table->sections_end; p++)
1369 {
1370 if (memaddr >= p->addr)
1371 {
1372 if (memend <= p->endaddr)
1373 {
1374 /* Entire transfer is within this section. */
1375 return target_read_live_memory (object, memaddr,
1376 readbuf, len);
1377 }
1378 else if (memaddr >= p->endaddr)
1379 {
1380 /* This section ends before the transfer starts. */
1381 continue;
1382 }
1383 else
1384 {
1385 /* This section overlaps the transfer. Just do half. */
1386 len = p->endaddr - memaddr;
1387 return target_read_live_memory (object, memaddr,
1388 readbuf, len);
1389 }
1390 }
1391 }
1392 }
1393
1394 return 0;
1395 }
1396
1397 /* Perform a partial memory transfer.
1398 For docs see target.h, to_xfer_partial. */
1399
1400 static LONGEST
1401 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1402 void *readbuf, const void *writebuf, ULONGEST memaddr,
1403 LONGEST len)
1404 {
1405 LONGEST res;
1406 int reg_len;
1407 struct mem_region *region;
1408 struct inferior *inf;
1409
1410 /* For accesses to unmapped overlay sections, read directly from
1411 files. Must do this first, as MEMADDR may need adjustment. */
1412 if (readbuf != NULL && overlay_debugging)
1413 {
1414 struct obj_section *section = find_pc_overlay (memaddr);
1415
1416 if (pc_in_unmapped_range (memaddr, section))
1417 {
1418 struct target_section_table *table
1419 = target_get_section_table (ops);
1420 const char *section_name = section->the_bfd_section->name;
1421
1422 memaddr = overlay_mapped_address (memaddr, section);
1423 return section_table_xfer_memory_partial (readbuf, writebuf,
1424 memaddr, len,
1425 table->sections,
1426 table->sections_end,
1427 section_name);
1428 }
1429 }
1430
1431 /* Try the executable files, if "trust-readonly-sections" is set. */
1432 if (readbuf != NULL && trust_readonly)
1433 {
1434 struct target_section *secp;
1435 struct target_section_table *table;
1436
1437 secp = target_section_by_addr (ops, memaddr);
1438 if (secp != NULL
1439 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1440 & SEC_READONLY))
1441 {
1442 table = target_get_section_table (ops);
1443 return section_table_xfer_memory_partial (readbuf, writebuf,
1444 memaddr, len,
1445 table->sections,
1446 table->sections_end,
1447 NULL);
1448 }
1449 }
1450
1451 /* If reading unavailable memory in the context of traceframes, and
1452 this address falls within a read-only section, fallback to
1453 reading from live memory. */
1454 if (readbuf != NULL && get_traceframe_number () != -1)
1455 {
1456 VEC(mem_range_s) *available;
1457
1458 /* If we fail to get the set of available memory, then the
1459 target does not support querying traceframe info, and so we
1460 attempt reading from the traceframe anyway (assuming the
1461 target implements the old QTro packet then). */
1462 if (traceframe_available_memory (&available, memaddr, len))
1463 {
1464 struct cleanup *old_chain;
1465
1466 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1467
1468 if (VEC_empty (mem_range_s, available)
1469 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1470 {
1471 /* Don't read into the traceframe's available
1472 memory. */
1473 if (!VEC_empty (mem_range_s, available))
1474 {
1475 LONGEST oldlen = len;
1476
1477 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1478 gdb_assert (len <= oldlen);
1479 }
1480
1481 do_cleanups (old_chain);
1482
1483 /* This goes through the topmost target again. */
1484 res = memory_xfer_live_readonly_partial (ops, object,
1485 readbuf, memaddr, len);
1486 if (res > 0)
1487 return res;
1488
1489 /* No use trying further, we know some memory starting
1490 at MEMADDR isn't available. */
1491 return -1;
1492 }
1493
1494 /* Don't try to read more than how much is available, in
1495 case the target implements the deprecated QTro packet to
1496 cater for older GDBs (the target's knowledge of read-only
1497 sections may be outdated by now). */
1498 len = VEC_index (mem_range_s, available, 0)->length;
1499
1500 do_cleanups (old_chain);
1501 }
1502 }
1503
1504 /* Try GDB's internal data cache. */
1505 region = lookup_mem_region (memaddr);
1506 /* region->hi == 0 means there's no upper bound. */
1507 if (memaddr + len < region->hi || region->hi == 0)
1508 reg_len = len;
1509 else
1510 reg_len = region->hi - memaddr;
1511
1512 switch (region->attrib.mode)
1513 {
1514 case MEM_RO:
1515 if (writebuf != NULL)
1516 return -1;
1517 break;
1518
1519 case MEM_WO:
1520 if (readbuf != NULL)
1521 return -1;
1522 break;
1523
1524 case MEM_FLASH:
1525 /* We only support writing to flash during "load" for now. */
1526 if (writebuf != NULL)
1527 error (_("Writing to flash memory forbidden in this context"));
1528 break;
1529
1530 case MEM_NONE:
1531 return -1;
1532 }
1533
1534 if (!ptid_equal (inferior_ptid, null_ptid))
1535 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1536 else
1537 inf = NULL;
1538
1539 if (inf != NULL
1540 /* The dcache reads whole cache lines; that doesn't play well
1541 with reading from a trace buffer, because reading outside of
1542 the collected memory range fails. */
1543 && get_traceframe_number () == -1
1544 && (region->attrib.cache
1545 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1546 {
1547 if (readbuf != NULL)
1548 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1549 reg_len, 0);
1550 else
1551 /* FIXME drow/2006-08-09: If we're going to preserve const
1552 correctness dcache_xfer_memory should take readbuf and
1553 writebuf. */
1554 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1555 (void *) writebuf,
1556 reg_len, 1);
1557 if (res <= 0)
1558 return -1;
1559 else
1560 return res;
1561 }
1562
1563 /* If none of those methods found the memory we wanted, fall back
1564 to a target partial transfer. Normally a single call to
1565 to_xfer_partial is enough; if it doesn't recognize an object
1566 it will call the to_xfer_partial of the next target down.
1567 But for memory this won't do. Memory is the only target
1568 object which can be read from more than one valid target.
1569 A core file, for instance, could have some of memory but
1570 delegate other bits to the target below it. So, we must
1571 manually try all targets. */
1572
1573 do
1574 {
1575 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1576 readbuf, writebuf, memaddr, reg_len);
1577 if (res > 0)
1578 break;
1579
1580 /* We want to continue past core files to executables, but not
1581 past a running target's memory. */
1582 if (ops->to_has_all_memory (ops))
1583 break;
1584
1585 ops = ops->beneath;
1586 }
1587 while (ops != NULL);
1588
1589 /* Make sure the cache gets updated no matter what - if we are writing
1590 to the stack. Even if this write is not tagged as such, we still need
1591 to update the cache. */
1592
1593 if (res > 0
1594 && inf != NULL
1595 && writebuf != NULL
1596 && !region->attrib.cache
1597 && stack_cache_enabled_p
1598 && object != TARGET_OBJECT_STACK_MEMORY)
1599 {
1600 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1601 }
1602
1603 /* If we still haven't got anything, return the last error. We
1604 give up. */
1605 return res;
1606 }
1607
1608 /* Perform a partial memory transfer. For docs see target.h,
1609 to_xfer_partial. */
1610
1611 static LONGEST
1612 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1613 void *readbuf, const void *writebuf, ULONGEST memaddr,
1614 LONGEST len)
1615 {
1616 int res;
1617
1618 /* Zero length requests are ok and require no work. */
1619 if (len == 0)
1620 return 0;
1621
1622 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1623 breakpoint insns, thus hiding out from higher layers whether
1624 there are software breakpoints inserted in the code stream. */
1625 if (readbuf != NULL)
1626 {
1627 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1628
1629 if (res > 0 && !show_memory_breakpoints)
1630 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1631 }
1632 else
1633 {
1634 void *buf;
1635 struct cleanup *old_chain;
1636
1637 buf = xmalloc (len);
1638 old_chain = make_cleanup (xfree, buf);
1639 memcpy (buf, writebuf, len);
1640
1641 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1642 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1643
1644 do_cleanups (old_chain);
1645 }
1646
1647 return res;
1648 }
1649
1650 static void
1651 restore_show_memory_breakpoints (void *arg)
1652 {
1653 show_memory_breakpoints = (uintptr_t) arg;
1654 }
1655
1656 struct cleanup *
1657 make_show_memory_breakpoints_cleanup (int show)
1658 {
1659 int current = show_memory_breakpoints;
1660
1661 show_memory_breakpoints = show;
1662 return make_cleanup (restore_show_memory_breakpoints,
1663 (void *) (uintptr_t) current);
1664 }
1665
1666 /* For docs see target.h, to_xfer_partial. */
1667
1668 static LONGEST
1669 target_xfer_partial (struct target_ops *ops,
1670 enum target_object object, const char *annex,
1671 void *readbuf, const void *writebuf,
1672 ULONGEST offset, LONGEST len)
1673 {
1674 LONGEST retval;
1675
1676 gdb_assert (ops->to_xfer_partial != NULL);
1677
1678 if (writebuf && !may_write_memory)
1679 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1680 core_addr_to_string_nz (offset), plongest (len));
1681
1682 /* If this is a memory transfer, let the memory-specific code
1683 have a look at it instead. Memory transfers are more
1684 complicated. */
1685 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1686 retval = memory_xfer_partial (ops, object, readbuf,
1687 writebuf, offset, len);
1688 else
1689 {
1690 enum target_object raw_object = object;
1691
1692 /* If this is a raw memory transfer, request the normal
1693 memory object from other layers. */
1694 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1695 raw_object = TARGET_OBJECT_MEMORY;
1696
1697 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1698 writebuf, offset, len);
1699 }
1700
1701 if (targetdebug)
1702 {
1703 const unsigned char *myaddr = NULL;
1704
1705 fprintf_unfiltered (gdb_stdlog,
1706 "%s:target_xfer_partial "
1707 "(%d, %s, %s, %s, %s, %s) = %s",
1708 ops->to_shortname,
1709 (int) object,
1710 (annex ? annex : "(null)"),
1711 host_address_to_string (readbuf),
1712 host_address_to_string (writebuf),
1713 core_addr_to_string_nz (offset),
1714 plongest (len), plongest (retval));
1715
1716 if (readbuf)
1717 myaddr = readbuf;
1718 if (writebuf)
1719 myaddr = writebuf;
1720 if (retval > 0 && myaddr != NULL)
1721 {
1722 int i;
1723
1724 fputs_unfiltered (", bytes =", gdb_stdlog);
1725 for (i = 0; i < retval; i++)
1726 {
1727 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1728 {
1729 if (targetdebug < 2 && i > 0)
1730 {
1731 fprintf_unfiltered (gdb_stdlog, " ...");
1732 break;
1733 }
1734 fprintf_unfiltered (gdb_stdlog, "\n");
1735 }
1736
1737 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1738 }
1739 }
1740
1741 fputc_unfiltered ('\n', gdb_stdlog);
1742 }
1743 return retval;
1744 }
1745
1746 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1747 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1748 if any error occurs.
1749
1750 If an error occurs, no guarantee is made about the contents of the data at
1751 MYADDR. In particular, the caller should not depend upon partial reads
1752 filling the buffer with good data. There is no way for the caller to know
1753 how much good data might have been transfered anyway. Callers that can
1754 deal with partial reads should call target_read (which will retry until
1755 it makes no progress, and then return how much was transferred). */
1756
1757 int
1758 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1759 {
1760 /* Dispatch to the topmost target, not the flattened current_target.
1761 Memory accesses check target->to_has_(all_)memory, and the
1762 flattened target doesn't inherit those. */
1763 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1764 myaddr, memaddr, len) == len)
1765 return 0;
1766 else
1767 return EIO;
1768 }
1769
1770 /* Like target_read_memory, but specify explicitly that this is a read from
1771 the target's stack. This may trigger different cache behavior. */
1772
1773 int
1774 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1775 {
1776 /* Dispatch to the topmost target, not the flattened current_target.
1777 Memory accesses check target->to_has_(all_)memory, and the
1778 flattened target doesn't inherit those. */
1779
1780 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1781 myaddr, memaddr, len) == len)
1782 return 0;
1783 else
1784 return EIO;
1785 }
1786
1787 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1788 Returns either 0 for success or an errno value if any error occurs.
1789 If an error occurs, no guarantee is made about how much data got written.
1790 Callers that can deal with partial writes should call target_write. */
1791
1792 int
1793 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1794 {
1795 /* Dispatch to the topmost target, not the flattened current_target.
1796 Memory accesses check target->to_has_(all_)memory, and the
1797 flattened target doesn't inherit those. */
1798 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1799 myaddr, memaddr, len) == len)
1800 return 0;
1801 else
1802 return EIO;
1803 }
1804
1805 /* Write LEN bytes from MYADDR to target raw memory at address
1806 MEMADDR. Returns either 0 for success or an errno value if any
1807 error occurs. If an error occurs, no guarantee is made about how
1808 much data got written. Callers that can deal with partial writes
1809 should call target_write. */
1810
1811 int
1812 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1813 {
1814 /* Dispatch to the topmost target, not the flattened current_target.
1815 Memory accesses check target->to_has_(all_)memory, and the
1816 flattened target doesn't inherit those. */
1817 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1818 myaddr, memaddr, len) == len)
1819 return 0;
1820 else
1821 return EIO;
1822 }
1823
1824 /* Fetch the target's memory map. */
1825
1826 VEC(mem_region_s) *
1827 target_memory_map (void)
1828 {
1829 VEC(mem_region_s) *result;
1830 struct mem_region *last_one, *this_one;
1831 int ix;
1832 struct target_ops *t;
1833
1834 if (targetdebug)
1835 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1836
1837 for (t = current_target.beneath; t != NULL; t = t->beneath)
1838 if (t->to_memory_map != NULL)
1839 break;
1840
1841 if (t == NULL)
1842 return NULL;
1843
1844 result = t->to_memory_map (t);
1845 if (result == NULL)
1846 return NULL;
1847
1848 qsort (VEC_address (mem_region_s, result),
1849 VEC_length (mem_region_s, result),
1850 sizeof (struct mem_region), mem_region_cmp);
1851
1852 /* Check that regions do not overlap. Simultaneously assign
1853 a numbering for the "mem" commands to use to refer to
1854 each region. */
1855 last_one = NULL;
1856 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1857 {
1858 this_one->number = ix;
1859
1860 if (last_one && last_one->hi > this_one->lo)
1861 {
1862 warning (_("Overlapping regions in memory map: ignoring"));
1863 VEC_free (mem_region_s, result);
1864 return NULL;
1865 }
1866 last_one = this_one;
1867 }
1868
1869 return result;
1870 }
1871
1872 void
1873 target_flash_erase (ULONGEST address, LONGEST length)
1874 {
1875 struct target_ops *t;
1876
1877 for (t = current_target.beneath; t != NULL; t = t->beneath)
1878 if (t->to_flash_erase != NULL)
1879 {
1880 if (targetdebug)
1881 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1882 hex_string (address), phex (length, 0));
1883 t->to_flash_erase (t, address, length);
1884 return;
1885 }
1886
1887 tcomplain ();
1888 }
1889
1890 void
1891 target_flash_done (void)
1892 {
1893 struct target_ops *t;
1894
1895 for (t = current_target.beneath; t != NULL; t = t->beneath)
1896 if (t->to_flash_done != NULL)
1897 {
1898 if (targetdebug)
1899 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1900 t->to_flash_done (t);
1901 return;
1902 }
1903
1904 tcomplain ();
1905 }
1906
1907 static void
1908 show_trust_readonly (struct ui_file *file, int from_tty,
1909 struct cmd_list_element *c, const char *value)
1910 {
1911 fprintf_filtered (file,
1912 _("Mode for reading from readonly sections is %s.\n"),
1913 value);
1914 }
1915
1916 /* More generic transfers. */
1917
1918 static LONGEST
1919 default_xfer_partial (struct target_ops *ops, enum target_object object,
1920 const char *annex, gdb_byte *readbuf,
1921 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1922 {
1923 if (object == TARGET_OBJECT_MEMORY
1924 && ops->deprecated_xfer_memory != NULL)
1925 /* If available, fall back to the target's
1926 "deprecated_xfer_memory" method. */
1927 {
1928 int xfered = -1;
1929
1930 errno = 0;
1931 if (writebuf != NULL)
1932 {
1933 void *buffer = xmalloc (len);
1934 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1935
1936 memcpy (buffer, writebuf, len);
1937 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1938 1/*write*/, NULL, ops);
1939 do_cleanups (cleanup);
1940 }
1941 if (readbuf != NULL)
1942 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1943 0/*read*/, NULL, ops);
1944 if (xfered > 0)
1945 return xfered;
1946 else if (xfered == 0 && errno == 0)
1947 /* "deprecated_xfer_memory" uses 0, cross checked against
1948 ERRNO as one indication of an error. */
1949 return 0;
1950 else
1951 return -1;
1952 }
1953 else if (ops->beneath != NULL)
1954 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1955 readbuf, writebuf, offset, len);
1956 else
1957 return -1;
1958 }
1959
1960 /* The xfer_partial handler for the topmost target. Unlike the default,
1961 it does not need to handle memory specially; it just passes all
1962 requests down the stack. */
1963
1964 static LONGEST
1965 current_xfer_partial (struct target_ops *ops, enum target_object object,
1966 const char *annex, gdb_byte *readbuf,
1967 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1968 {
1969 if (ops->beneath != NULL)
1970 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1971 readbuf, writebuf, offset, len);
1972 else
1973 return -1;
1974 }
1975
1976 /* Target vector read/write partial wrapper functions. */
1977
1978 static LONGEST
1979 target_read_partial (struct target_ops *ops,
1980 enum target_object object,
1981 const char *annex, gdb_byte *buf,
1982 ULONGEST offset, LONGEST len)
1983 {
1984 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1985 }
1986
1987 static LONGEST
1988 target_write_partial (struct target_ops *ops,
1989 enum target_object object,
1990 const char *annex, const gdb_byte *buf,
1991 ULONGEST offset, LONGEST len)
1992 {
1993 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1994 }
1995
1996 /* Wrappers to perform the full transfer. */
1997
1998 /* For docs on target_read see target.h. */
1999
2000 LONGEST
2001 target_read (struct target_ops *ops,
2002 enum target_object object,
2003 const char *annex, gdb_byte *buf,
2004 ULONGEST offset, LONGEST len)
2005 {
2006 LONGEST xfered = 0;
2007
2008 while (xfered < len)
2009 {
2010 LONGEST xfer = target_read_partial (ops, object, annex,
2011 (gdb_byte *) buf + xfered,
2012 offset + xfered, len - xfered);
2013
2014 /* Call an observer, notifying them of the xfer progress? */
2015 if (xfer == 0)
2016 return xfered;
2017 if (xfer < 0)
2018 return -1;
2019 xfered += xfer;
2020 QUIT;
2021 }
2022 return len;
2023 }
2024
2025 /* Assuming that the entire [begin, end) range of memory cannot be
2026 read, try to read whatever subrange is possible to read.
2027
2028 The function returns, in RESULT, either zero or one memory block.
2029 If there's a readable subrange at the beginning, it is completely
2030 read and returned. Any further readable subrange will not be read.
2031 Otherwise, if there's a readable subrange at the end, it will be
2032 completely read and returned. Any readable subranges before it
2033 (obviously, not starting at the beginning), will be ignored. In
2034 other cases -- either no readable subrange, or readable subrange(s)
2035 that is neither at the beginning, or end, nothing is returned.
2036
2037 The purpose of this function is to handle a read across a boundary
2038 of accessible memory in a case when memory map is not available.
2039 The above restrictions are fine for this case, but will give
2040 incorrect results if the memory is 'patchy'. However, supporting
2041 'patchy' memory would require trying to read every single byte,
2042 and it seems unacceptable solution. Explicit memory map is
2043 recommended for this case -- and target_read_memory_robust will
2044 take care of reading multiple ranges then. */
2045
2046 static void
2047 read_whatever_is_readable (struct target_ops *ops,
2048 ULONGEST begin, ULONGEST end,
2049 VEC(memory_read_result_s) **result)
2050 {
2051 gdb_byte *buf = xmalloc (end - begin);
2052 ULONGEST current_begin = begin;
2053 ULONGEST current_end = end;
2054 int forward;
2055 memory_read_result_s r;
2056
2057 /* If we previously failed to read 1 byte, nothing can be done here. */
2058 if (end - begin <= 1)
2059 {
2060 xfree (buf);
2061 return;
2062 }
2063
2064 /* Check that either first or the last byte is readable, and give up
2065 if not. This heuristic is meant to permit reading accessible memory
2066 at the boundary of accessible region. */
2067 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2068 buf, begin, 1) == 1)
2069 {
2070 forward = 1;
2071 ++current_begin;
2072 }
2073 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2074 buf + (end-begin) - 1, end - 1, 1) == 1)
2075 {
2076 forward = 0;
2077 --current_end;
2078 }
2079 else
2080 {
2081 xfree (buf);
2082 return;
2083 }
2084
2085 /* Loop invariant is that the [current_begin, current_end) was previously
2086 found to be not readable as a whole.
2087
2088 Note loop condition -- if the range has 1 byte, we can't divide the range
2089 so there's no point trying further. */
2090 while (current_end - current_begin > 1)
2091 {
2092 ULONGEST first_half_begin, first_half_end;
2093 ULONGEST second_half_begin, second_half_end;
2094 LONGEST xfer;
2095 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2096
2097 if (forward)
2098 {
2099 first_half_begin = current_begin;
2100 first_half_end = middle;
2101 second_half_begin = middle;
2102 second_half_end = current_end;
2103 }
2104 else
2105 {
2106 first_half_begin = middle;
2107 first_half_end = current_end;
2108 second_half_begin = current_begin;
2109 second_half_end = middle;
2110 }
2111
2112 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2113 buf + (first_half_begin - begin),
2114 first_half_begin,
2115 first_half_end - first_half_begin);
2116
2117 if (xfer == first_half_end - first_half_begin)
2118 {
2119 /* This half reads up fine. So, the error must be in the
2120 other half. */
2121 current_begin = second_half_begin;
2122 current_end = second_half_end;
2123 }
2124 else
2125 {
2126 /* This half is not readable. Because we've tried one byte, we
2127 know some part of this half if actually redable. Go to the next
2128 iteration to divide again and try to read.
2129
2130 We don't handle the other half, because this function only tries
2131 to read a single readable subrange. */
2132 current_begin = first_half_begin;
2133 current_end = first_half_end;
2134 }
2135 }
2136
2137 if (forward)
2138 {
2139 /* The [begin, current_begin) range has been read. */
2140 r.begin = begin;
2141 r.end = current_begin;
2142 r.data = buf;
2143 }
2144 else
2145 {
2146 /* The [current_end, end) range has been read. */
2147 LONGEST rlen = end - current_end;
2148
2149 r.data = xmalloc (rlen);
2150 memcpy (r.data, buf + current_end - begin, rlen);
2151 r.begin = current_end;
2152 r.end = end;
2153 xfree (buf);
2154 }
2155 VEC_safe_push(memory_read_result_s, (*result), &r);
2156 }
2157
2158 void
2159 free_memory_read_result_vector (void *x)
2160 {
2161 VEC(memory_read_result_s) *v = x;
2162 memory_read_result_s *current;
2163 int ix;
2164
2165 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2166 {
2167 xfree (current->data);
2168 }
2169 VEC_free (memory_read_result_s, v);
2170 }
2171
2172 VEC(memory_read_result_s) *
2173 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2174 {
2175 VEC(memory_read_result_s) *result = 0;
2176
2177 LONGEST xfered = 0;
2178 while (xfered < len)
2179 {
2180 struct mem_region *region = lookup_mem_region (offset + xfered);
2181 LONGEST rlen;
2182
2183 /* If there is no explicit region, a fake one should be created. */
2184 gdb_assert (region);
2185
2186 if (region->hi == 0)
2187 rlen = len - xfered;
2188 else
2189 rlen = region->hi - offset;
2190
2191 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2192 {
2193 /* Cannot read this region. Note that we can end up here only
2194 if the region is explicitly marked inaccessible, or
2195 'inaccessible-by-default' is in effect. */
2196 xfered += rlen;
2197 }
2198 else
2199 {
2200 LONGEST to_read = min (len - xfered, rlen);
2201 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2202
2203 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2204 (gdb_byte *) buffer,
2205 offset + xfered, to_read);
2206 /* Call an observer, notifying them of the xfer progress? */
2207 if (xfer <= 0)
2208 {
2209 /* Got an error reading full chunk. See if maybe we can read
2210 some subrange. */
2211 xfree (buffer);
2212 read_whatever_is_readable (ops, offset + xfered,
2213 offset + xfered + to_read, &result);
2214 xfered += to_read;
2215 }
2216 else
2217 {
2218 struct memory_read_result r;
2219 r.data = buffer;
2220 r.begin = offset + xfered;
2221 r.end = r.begin + xfer;
2222 VEC_safe_push (memory_read_result_s, result, &r);
2223 xfered += xfer;
2224 }
2225 QUIT;
2226 }
2227 }
2228 return result;
2229 }
2230
2231
2232 /* An alternative to target_write with progress callbacks. */
2233
2234 LONGEST
2235 target_write_with_progress (struct target_ops *ops,
2236 enum target_object object,
2237 const char *annex, const gdb_byte *buf,
2238 ULONGEST offset, LONGEST len,
2239 void (*progress) (ULONGEST, void *), void *baton)
2240 {
2241 LONGEST xfered = 0;
2242
2243 /* Give the progress callback a chance to set up. */
2244 if (progress)
2245 (*progress) (0, baton);
2246
2247 while (xfered < len)
2248 {
2249 LONGEST xfer = target_write_partial (ops, object, annex,
2250 (gdb_byte *) buf + xfered,
2251 offset + xfered, len - xfered);
2252
2253 if (xfer == 0)
2254 return xfered;
2255 if (xfer < 0)
2256 return -1;
2257
2258 if (progress)
2259 (*progress) (xfer, baton);
2260
2261 xfered += xfer;
2262 QUIT;
2263 }
2264 return len;
2265 }
2266
2267 /* For docs on target_write see target.h. */
2268
2269 LONGEST
2270 target_write (struct target_ops *ops,
2271 enum target_object object,
2272 const char *annex, const gdb_byte *buf,
2273 ULONGEST offset, LONGEST len)
2274 {
2275 return target_write_with_progress (ops, object, annex, buf, offset, len,
2276 NULL, NULL);
2277 }
2278
2279 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2280 the size of the transferred data. PADDING additional bytes are
2281 available in *BUF_P. This is a helper function for
2282 target_read_alloc; see the declaration of that function for more
2283 information. */
2284
2285 static LONGEST
2286 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2287 const char *annex, gdb_byte **buf_p, int padding)
2288 {
2289 size_t buf_alloc, buf_pos;
2290 gdb_byte *buf;
2291 LONGEST n;
2292
2293 /* This function does not have a length parameter; it reads the
2294 entire OBJECT). Also, it doesn't support objects fetched partly
2295 from one target and partly from another (in a different stratum,
2296 e.g. a core file and an executable). Both reasons make it
2297 unsuitable for reading memory. */
2298 gdb_assert (object != TARGET_OBJECT_MEMORY);
2299
2300 /* Start by reading up to 4K at a time. The target will throttle
2301 this number down if necessary. */
2302 buf_alloc = 4096;
2303 buf = xmalloc (buf_alloc);
2304 buf_pos = 0;
2305 while (1)
2306 {
2307 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2308 buf_pos, buf_alloc - buf_pos - padding);
2309 if (n < 0)
2310 {
2311 /* An error occurred. */
2312 xfree (buf);
2313 return -1;
2314 }
2315 else if (n == 0)
2316 {
2317 /* Read all there was. */
2318 if (buf_pos == 0)
2319 xfree (buf);
2320 else
2321 *buf_p = buf;
2322 return buf_pos;
2323 }
2324
2325 buf_pos += n;
2326
2327 /* If the buffer is filling up, expand it. */
2328 if (buf_alloc < buf_pos * 2)
2329 {
2330 buf_alloc *= 2;
2331 buf = xrealloc (buf, buf_alloc);
2332 }
2333
2334 QUIT;
2335 }
2336 }
2337
2338 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2339 the size of the transferred data. See the declaration in "target.h"
2340 function for more information about the return value. */
2341
2342 LONGEST
2343 target_read_alloc (struct target_ops *ops, enum target_object object,
2344 const char *annex, gdb_byte **buf_p)
2345 {
2346 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2347 }
2348
2349 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2350 returned as a string, allocated using xmalloc. If an error occurs
2351 or the transfer is unsupported, NULL is returned. Empty objects
2352 are returned as allocated but empty strings. A warning is issued
2353 if the result contains any embedded NUL bytes. */
2354
2355 char *
2356 target_read_stralloc (struct target_ops *ops, enum target_object object,
2357 const char *annex)
2358 {
2359 gdb_byte *buffer;
2360 LONGEST i, transferred;
2361
2362 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2363
2364 if (transferred < 0)
2365 return NULL;
2366
2367 if (transferred == 0)
2368 return xstrdup ("");
2369
2370 buffer[transferred] = 0;
2371
2372 /* Check for embedded NUL bytes; but allow trailing NULs. */
2373 for (i = strlen (buffer); i < transferred; i++)
2374 if (buffer[i] != 0)
2375 {
2376 warning (_("target object %d, annex %s, "
2377 "contained unexpected null characters"),
2378 (int) object, annex ? annex : "(none)");
2379 break;
2380 }
2381
2382 return (char *) buffer;
2383 }
2384
2385 /* Memory transfer methods. */
2386
2387 void
2388 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2389 LONGEST len)
2390 {
2391 /* This method is used to read from an alternate, non-current
2392 target. This read must bypass the overlay support (as symbols
2393 don't match this target), and GDB's internal cache (wrong cache
2394 for this target). */
2395 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2396 != len)
2397 memory_error (EIO, addr);
2398 }
2399
2400 ULONGEST
2401 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2402 int len, enum bfd_endian byte_order)
2403 {
2404 gdb_byte buf[sizeof (ULONGEST)];
2405
2406 gdb_assert (len <= sizeof (buf));
2407 get_target_memory (ops, addr, buf, len);
2408 return extract_unsigned_integer (buf, len, byte_order);
2409 }
2410
2411 int
2412 target_insert_breakpoint (struct gdbarch *gdbarch,
2413 struct bp_target_info *bp_tgt)
2414 {
2415 if (!may_insert_breakpoints)
2416 {
2417 warning (_("May not insert breakpoints"));
2418 return 1;
2419 }
2420
2421 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2422 }
2423
2424 int
2425 target_remove_breakpoint (struct gdbarch *gdbarch,
2426 struct bp_target_info *bp_tgt)
2427 {
2428 /* This is kind of a weird case to handle, but the permission might
2429 have been changed after breakpoints were inserted - in which case
2430 we should just take the user literally and assume that any
2431 breakpoints should be left in place. */
2432 if (!may_insert_breakpoints)
2433 {
2434 warning (_("May not remove breakpoints"));
2435 return 1;
2436 }
2437
2438 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2439 }
2440
2441 static void
2442 target_info (char *args, int from_tty)
2443 {
2444 struct target_ops *t;
2445 int has_all_mem = 0;
2446
2447 if (symfile_objfile != NULL)
2448 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2449
2450 for (t = target_stack; t != NULL; t = t->beneath)
2451 {
2452 if (!(*t->to_has_memory) (t))
2453 continue;
2454
2455 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2456 continue;
2457 if (has_all_mem)
2458 printf_unfiltered (_("\tWhile running this, "
2459 "GDB does not access memory from...\n"));
2460 printf_unfiltered ("%s:\n", t->to_longname);
2461 (t->to_files_info) (t);
2462 has_all_mem = (*t->to_has_all_memory) (t);
2463 }
2464 }
2465
2466 /* This function is called before any new inferior is created, e.g.
2467 by running a program, attaching, or connecting to a target.
2468 It cleans up any state from previous invocations which might
2469 change between runs. This is a subset of what target_preopen
2470 resets (things which might change between targets). */
2471
2472 void
2473 target_pre_inferior (int from_tty)
2474 {
2475 /* Clear out solib state. Otherwise the solib state of the previous
2476 inferior might have survived and is entirely wrong for the new
2477 target. This has been observed on GNU/Linux using glibc 2.3. How
2478 to reproduce:
2479
2480 bash$ ./foo&
2481 [1] 4711
2482 bash$ ./foo&
2483 [1] 4712
2484 bash$ gdb ./foo
2485 [...]
2486 (gdb) attach 4711
2487 (gdb) detach
2488 (gdb) attach 4712
2489 Cannot access memory at address 0xdeadbeef
2490 */
2491
2492 /* In some OSs, the shared library list is the same/global/shared
2493 across inferiors. If code is shared between processes, so are
2494 memory regions and features. */
2495 if (!gdbarch_has_global_solist (target_gdbarch))
2496 {
2497 no_shared_libraries (NULL, from_tty);
2498
2499 invalidate_target_mem_regions ();
2500
2501 target_clear_description ();
2502 }
2503 }
2504
2505 /* Callback for iterate_over_inferiors. Gets rid of the given
2506 inferior. */
2507
2508 static int
2509 dispose_inferior (struct inferior *inf, void *args)
2510 {
2511 struct thread_info *thread;
2512
2513 thread = any_thread_of_process (inf->pid);
2514 if (thread)
2515 {
2516 switch_to_thread (thread->ptid);
2517
2518 /* Core inferiors actually should be detached, not killed. */
2519 if (target_has_execution)
2520 target_kill ();
2521 else
2522 target_detach (NULL, 0);
2523 }
2524
2525 return 0;
2526 }
2527
2528 /* This is to be called by the open routine before it does
2529 anything. */
2530
2531 void
2532 target_preopen (int from_tty)
2533 {
2534 dont_repeat ();
2535
2536 if (have_inferiors ())
2537 {
2538 if (!from_tty
2539 || !have_live_inferiors ()
2540 || query (_("A program is being debugged already. Kill it? ")))
2541 iterate_over_inferiors (dispose_inferior, NULL);
2542 else
2543 error (_("Program not killed."));
2544 }
2545
2546 /* Calling target_kill may remove the target from the stack. But if
2547 it doesn't (which seems like a win for UDI), remove it now. */
2548 /* Leave the exec target, though. The user may be switching from a
2549 live process to a core of the same program. */
2550 pop_all_targets_above (file_stratum, 0);
2551
2552 target_pre_inferior (from_tty);
2553 }
2554
2555 /* Detach a target after doing deferred register stores. */
2556
2557 void
2558 target_detach (char *args, int from_tty)
2559 {
2560 struct target_ops* t;
2561
2562 if (gdbarch_has_global_breakpoints (target_gdbarch))
2563 /* Don't remove global breakpoints here. They're removed on
2564 disconnection from the target. */
2565 ;
2566 else
2567 /* If we're in breakpoints-always-inserted mode, have to remove
2568 them before detaching. */
2569 remove_breakpoints_pid (PIDGET (inferior_ptid));
2570
2571 prepare_for_detach ();
2572
2573 for (t = current_target.beneath; t != NULL; t = t->beneath)
2574 {
2575 if (t->to_detach != NULL)
2576 {
2577 t->to_detach (t, args, from_tty);
2578 if (targetdebug)
2579 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2580 args, from_tty);
2581 return;
2582 }
2583 }
2584
2585 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2586 }
2587
2588 void
2589 target_disconnect (char *args, int from_tty)
2590 {
2591 struct target_ops *t;
2592
2593 /* If we're in breakpoints-always-inserted mode or if breakpoints
2594 are global across processes, we have to remove them before
2595 disconnecting. */
2596 remove_breakpoints ();
2597
2598 for (t = current_target.beneath; t != NULL; t = t->beneath)
2599 if (t->to_disconnect != NULL)
2600 {
2601 if (targetdebug)
2602 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2603 args, from_tty);
2604 t->to_disconnect (t, args, from_tty);
2605 return;
2606 }
2607
2608 tcomplain ();
2609 }
2610
2611 ptid_t
2612 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2613 {
2614 struct target_ops *t;
2615
2616 for (t = current_target.beneath; t != NULL; t = t->beneath)
2617 {
2618 if (t->to_wait != NULL)
2619 {
2620 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2621
2622 if (targetdebug)
2623 {
2624 char *status_string;
2625
2626 status_string = target_waitstatus_to_string (status);
2627 fprintf_unfiltered (gdb_stdlog,
2628 "target_wait (%d, status) = %d, %s\n",
2629 PIDGET (ptid), PIDGET (retval),
2630 status_string);
2631 xfree (status_string);
2632 }
2633
2634 return retval;
2635 }
2636 }
2637
2638 noprocess ();
2639 }
2640
2641 char *
2642 target_pid_to_str (ptid_t ptid)
2643 {
2644 struct target_ops *t;
2645
2646 for (t = current_target.beneath; t != NULL; t = t->beneath)
2647 {
2648 if (t->to_pid_to_str != NULL)
2649 return (*t->to_pid_to_str) (t, ptid);
2650 }
2651
2652 return normal_pid_to_str (ptid);
2653 }
2654
2655 char *
2656 target_thread_name (struct thread_info *info)
2657 {
2658 struct target_ops *t;
2659
2660 for (t = current_target.beneath; t != NULL; t = t->beneath)
2661 {
2662 if (t->to_thread_name != NULL)
2663 return (*t->to_thread_name) (info);
2664 }
2665
2666 return NULL;
2667 }
2668
2669 void
2670 target_resume (ptid_t ptid, int step, enum target_signal signal)
2671 {
2672 struct target_ops *t;
2673
2674 target_dcache_invalidate ();
2675
2676 for (t = current_target.beneath; t != NULL; t = t->beneath)
2677 {
2678 if (t->to_resume != NULL)
2679 {
2680 t->to_resume (t, ptid, step, signal);
2681 if (targetdebug)
2682 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2683 PIDGET (ptid),
2684 step ? "step" : "continue",
2685 target_signal_to_name (signal));
2686
2687 registers_changed_ptid (ptid);
2688 set_executing (ptid, 1);
2689 set_running (ptid, 1);
2690 clear_inline_frame_state (ptid);
2691 return;
2692 }
2693 }
2694
2695 noprocess ();
2696 }
2697
2698 void
2699 target_pass_signals (int numsigs, unsigned char *pass_signals)
2700 {
2701 struct target_ops *t;
2702
2703 for (t = current_target.beneath; t != NULL; t = t->beneath)
2704 {
2705 if (t->to_pass_signals != NULL)
2706 {
2707 if (targetdebug)
2708 {
2709 int i;
2710
2711 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2712 numsigs);
2713
2714 for (i = 0; i < numsigs; i++)
2715 if (pass_signals[i])
2716 fprintf_unfiltered (gdb_stdlog, " %s",
2717 target_signal_to_name (i));
2718
2719 fprintf_unfiltered (gdb_stdlog, " })\n");
2720 }
2721
2722 (*t->to_pass_signals) (numsigs, pass_signals);
2723 return;
2724 }
2725 }
2726 }
2727
2728 /* Look through the list of possible targets for a target that can
2729 follow forks. */
2730
2731 int
2732 target_follow_fork (int follow_child)
2733 {
2734 struct target_ops *t;
2735
2736 for (t = current_target.beneath; t != NULL; t = t->beneath)
2737 {
2738 if (t->to_follow_fork != NULL)
2739 {
2740 int retval = t->to_follow_fork (t, follow_child);
2741
2742 if (targetdebug)
2743 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2744 follow_child, retval);
2745 return retval;
2746 }
2747 }
2748
2749 /* Some target returned a fork event, but did not know how to follow it. */
2750 internal_error (__FILE__, __LINE__,
2751 _("could not find a target to follow fork"));
2752 }
2753
2754 void
2755 target_mourn_inferior (void)
2756 {
2757 struct target_ops *t;
2758
2759 for (t = current_target.beneath; t != NULL; t = t->beneath)
2760 {
2761 if (t->to_mourn_inferior != NULL)
2762 {
2763 t->to_mourn_inferior (t);
2764 if (targetdebug)
2765 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2766
2767 /* We no longer need to keep handles on any of the object files.
2768 Make sure to release them to avoid unnecessarily locking any
2769 of them while we're not actually debugging. */
2770 bfd_cache_close_all ();
2771
2772 return;
2773 }
2774 }
2775
2776 internal_error (__FILE__, __LINE__,
2777 _("could not find a target to follow mourn inferior"));
2778 }
2779
2780 /* Look for a target which can describe architectural features, starting
2781 from TARGET. If we find one, return its description. */
2782
2783 const struct target_desc *
2784 target_read_description (struct target_ops *target)
2785 {
2786 struct target_ops *t;
2787
2788 for (t = target; t != NULL; t = t->beneath)
2789 if (t->to_read_description != NULL)
2790 {
2791 const struct target_desc *tdesc;
2792
2793 tdesc = t->to_read_description (t);
2794 if (tdesc)
2795 return tdesc;
2796 }
2797
2798 return NULL;
2799 }
2800
2801 /* The default implementation of to_search_memory.
2802 This implements a basic search of memory, reading target memory and
2803 performing the search here (as opposed to performing the search in on the
2804 target side with, for example, gdbserver). */
2805
2806 int
2807 simple_search_memory (struct target_ops *ops,
2808 CORE_ADDR start_addr, ULONGEST search_space_len,
2809 const gdb_byte *pattern, ULONGEST pattern_len,
2810 CORE_ADDR *found_addrp)
2811 {
2812 /* NOTE: also defined in find.c testcase. */
2813 #define SEARCH_CHUNK_SIZE 16000
2814 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2815 /* Buffer to hold memory contents for searching. */
2816 gdb_byte *search_buf;
2817 unsigned search_buf_size;
2818 struct cleanup *old_cleanups;
2819
2820 search_buf_size = chunk_size + pattern_len - 1;
2821
2822 /* No point in trying to allocate a buffer larger than the search space. */
2823 if (search_space_len < search_buf_size)
2824 search_buf_size = search_space_len;
2825
2826 search_buf = malloc (search_buf_size);
2827 if (search_buf == NULL)
2828 error (_("Unable to allocate memory to perform the search."));
2829 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2830
2831 /* Prime the search buffer. */
2832
2833 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2834 search_buf, start_addr, search_buf_size) != search_buf_size)
2835 {
2836 warning (_("Unable to access target memory at %s, halting search."),
2837 hex_string (start_addr));
2838 do_cleanups (old_cleanups);
2839 return -1;
2840 }
2841
2842 /* Perform the search.
2843
2844 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2845 When we've scanned N bytes we copy the trailing bytes to the start and
2846 read in another N bytes. */
2847
2848 while (search_space_len >= pattern_len)
2849 {
2850 gdb_byte *found_ptr;
2851 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2852
2853 found_ptr = memmem (search_buf, nr_search_bytes,
2854 pattern, pattern_len);
2855
2856 if (found_ptr != NULL)
2857 {
2858 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2859
2860 *found_addrp = found_addr;
2861 do_cleanups (old_cleanups);
2862 return 1;
2863 }
2864
2865 /* Not found in this chunk, skip to next chunk. */
2866
2867 /* Don't let search_space_len wrap here, it's unsigned. */
2868 if (search_space_len >= chunk_size)
2869 search_space_len -= chunk_size;
2870 else
2871 search_space_len = 0;
2872
2873 if (search_space_len >= pattern_len)
2874 {
2875 unsigned keep_len = search_buf_size - chunk_size;
2876 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2877 int nr_to_read;
2878
2879 /* Copy the trailing part of the previous iteration to the front
2880 of the buffer for the next iteration. */
2881 gdb_assert (keep_len == pattern_len - 1);
2882 memcpy (search_buf, search_buf + chunk_size, keep_len);
2883
2884 nr_to_read = min (search_space_len - keep_len, chunk_size);
2885
2886 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2887 search_buf + keep_len, read_addr,
2888 nr_to_read) != nr_to_read)
2889 {
2890 warning (_("Unable to access target "
2891 "memory at %s, halting search."),
2892 hex_string (read_addr));
2893 do_cleanups (old_cleanups);
2894 return -1;
2895 }
2896
2897 start_addr += chunk_size;
2898 }
2899 }
2900
2901 /* Not found. */
2902
2903 do_cleanups (old_cleanups);
2904 return 0;
2905 }
2906
2907 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2908 sequence of bytes in PATTERN with length PATTERN_LEN.
2909
2910 The result is 1 if found, 0 if not found, and -1 if there was an error
2911 requiring halting of the search (e.g. memory read error).
2912 If the pattern is found the address is recorded in FOUND_ADDRP. */
2913
2914 int
2915 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2916 const gdb_byte *pattern, ULONGEST pattern_len,
2917 CORE_ADDR *found_addrp)
2918 {
2919 struct target_ops *t;
2920 int found;
2921
2922 /* We don't use INHERIT to set current_target.to_search_memory,
2923 so we have to scan the target stack and handle targetdebug
2924 ourselves. */
2925
2926 if (targetdebug)
2927 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2928 hex_string (start_addr));
2929
2930 for (t = current_target.beneath; t != NULL; t = t->beneath)
2931 if (t->to_search_memory != NULL)
2932 break;
2933
2934 if (t != NULL)
2935 {
2936 found = t->to_search_memory (t, start_addr, search_space_len,
2937 pattern, pattern_len, found_addrp);
2938 }
2939 else
2940 {
2941 /* If a special version of to_search_memory isn't available, use the
2942 simple version. */
2943 found = simple_search_memory (current_target.beneath,
2944 start_addr, search_space_len,
2945 pattern, pattern_len, found_addrp);
2946 }
2947
2948 if (targetdebug)
2949 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2950
2951 return found;
2952 }
2953
2954 /* Look through the currently pushed targets. If none of them will
2955 be able to restart the currently running process, issue an error
2956 message. */
2957
2958 void
2959 target_require_runnable (void)
2960 {
2961 struct target_ops *t;
2962
2963 for (t = target_stack; t != NULL; t = t->beneath)
2964 {
2965 /* If this target knows how to create a new program, then
2966 assume we will still be able to after killing the current
2967 one. Either killing and mourning will not pop T, or else
2968 find_default_run_target will find it again. */
2969 if (t->to_create_inferior != NULL)
2970 return;
2971
2972 /* Do not worry about thread_stratum targets that can not
2973 create inferiors. Assume they will be pushed again if
2974 necessary, and continue to the process_stratum. */
2975 if (t->to_stratum == thread_stratum
2976 || t->to_stratum == arch_stratum)
2977 continue;
2978
2979 error (_("The \"%s\" target does not support \"run\". "
2980 "Try \"help target\" or \"continue\"."),
2981 t->to_shortname);
2982 }
2983
2984 /* This function is only called if the target is running. In that
2985 case there should have been a process_stratum target and it
2986 should either know how to create inferiors, or not... */
2987 internal_error (__FILE__, __LINE__, _("No targets found"));
2988 }
2989
2990 /* Look through the list of possible targets for a target that can
2991 execute a run or attach command without any other data. This is
2992 used to locate the default process stratum.
2993
2994 If DO_MESG is not NULL, the result is always valid (error() is
2995 called for errors); else, return NULL on error. */
2996
2997 static struct target_ops *
2998 find_default_run_target (char *do_mesg)
2999 {
3000 struct target_ops **t;
3001 struct target_ops *runable = NULL;
3002 int count;
3003
3004 count = 0;
3005
3006 for (t = target_structs; t < target_structs + target_struct_size;
3007 ++t)
3008 {
3009 if ((*t)->to_can_run && target_can_run (*t))
3010 {
3011 runable = *t;
3012 ++count;
3013 }
3014 }
3015
3016 if (count != 1)
3017 {
3018 if (do_mesg)
3019 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3020 else
3021 return NULL;
3022 }
3023
3024 return runable;
3025 }
3026
3027 void
3028 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3029 {
3030 struct target_ops *t;
3031
3032 t = find_default_run_target ("attach");
3033 (t->to_attach) (t, args, from_tty);
3034 return;
3035 }
3036
3037 void
3038 find_default_create_inferior (struct target_ops *ops,
3039 char *exec_file, char *allargs, char **env,
3040 int from_tty)
3041 {
3042 struct target_ops *t;
3043
3044 t = find_default_run_target ("run");
3045 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3046 return;
3047 }
3048
3049 static int
3050 find_default_can_async_p (void)
3051 {
3052 struct target_ops *t;
3053
3054 /* This may be called before the target is pushed on the stack;
3055 look for the default process stratum. If there's none, gdb isn't
3056 configured with a native debugger, and target remote isn't
3057 connected yet. */
3058 t = find_default_run_target (NULL);
3059 if (t && t->to_can_async_p)
3060 return (t->to_can_async_p) ();
3061 return 0;
3062 }
3063
3064 static int
3065 find_default_is_async_p (void)
3066 {
3067 struct target_ops *t;
3068
3069 /* This may be called before the target is pushed on the stack;
3070 look for the default process stratum. If there's none, gdb isn't
3071 configured with a native debugger, and target remote isn't
3072 connected yet. */
3073 t = find_default_run_target (NULL);
3074 if (t && t->to_is_async_p)
3075 return (t->to_is_async_p) ();
3076 return 0;
3077 }
3078
3079 static int
3080 find_default_supports_non_stop (void)
3081 {
3082 struct target_ops *t;
3083
3084 t = find_default_run_target (NULL);
3085 if (t && t->to_supports_non_stop)
3086 return (t->to_supports_non_stop) ();
3087 return 0;
3088 }
3089
3090 int
3091 target_supports_non_stop (void)
3092 {
3093 struct target_ops *t;
3094
3095 for (t = &current_target; t != NULL; t = t->beneath)
3096 if (t->to_supports_non_stop)
3097 return t->to_supports_non_stop ();
3098
3099 return 0;
3100 }
3101
3102 /* Implement the "info proc" command. */
3103
3104 void
3105 target_info_proc (char *args, enum info_proc_what what)
3106 {
3107 struct target_ops *t;
3108
3109 /* If we're already connected to something that can get us OS
3110 related data, use it. Otherwise, try using the native
3111 target. */
3112 if (current_target.to_stratum >= process_stratum)
3113 t = current_target.beneath;
3114 else
3115 t = find_default_run_target (NULL);
3116
3117 for (; t != NULL; t = t->beneath)
3118 {
3119 if (t->to_info_proc != NULL)
3120 {
3121 t->to_info_proc (t, args, what);
3122
3123 if (targetdebug)
3124 fprintf_unfiltered (gdb_stdlog,
3125 "target_info_proc (\"%s\", %d)\n", args, what);
3126
3127 return;
3128 }
3129 }
3130
3131 error (_("Not supported on this target."));
3132 }
3133
3134 static int
3135 find_default_supports_disable_randomization (void)
3136 {
3137 struct target_ops *t;
3138
3139 t = find_default_run_target (NULL);
3140 if (t && t->to_supports_disable_randomization)
3141 return (t->to_supports_disable_randomization) ();
3142 return 0;
3143 }
3144
3145 int
3146 target_supports_disable_randomization (void)
3147 {
3148 struct target_ops *t;
3149
3150 for (t = &current_target; t != NULL; t = t->beneath)
3151 if (t->to_supports_disable_randomization)
3152 return t->to_supports_disable_randomization ();
3153
3154 return 0;
3155 }
3156
3157 char *
3158 target_get_osdata (const char *type)
3159 {
3160 struct target_ops *t;
3161
3162 /* If we're already connected to something that can get us OS
3163 related data, use it. Otherwise, try using the native
3164 target. */
3165 if (current_target.to_stratum >= process_stratum)
3166 t = current_target.beneath;
3167 else
3168 t = find_default_run_target ("get OS data");
3169
3170 if (!t)
3171 return NULL;
3172
3173 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3174 }
3175
3176 /* Determine the current address space of thread PTID. */
3177
3178 struct address_space *
3179 target_thread_address_space (ptid_t ptid)
3180 {
3181 struct address_space *aspace;
3182 struct inferior *inf;
3183 struct target_ops *t;
3184
3185 for (t = current_target.beneath; t != NULL; t = t->beneath)
3186 {
3187 if (t->to_thread_address_space != NULL)
3188 {
3189 aspace = t->to_thread_address_space (t, ptid);
3190 gdb_assert (aspace);
3191
3192 if (targetdebug)
3193 fprintf_unfiltered (gdb_stdlog,
3194 "target_thread_address_space (%s) = %d\n",
3195 target_pid_to_str (ptid),
3196 address_space_num (aspace));
3197 return aspace;
3198 }
3199 }
3200
3201 /* Fall-back to the "main" address space of the inferior. */
3202 inf = find_inferior_pid (ptid_get_pid (ptid));
3203
3204 if (inf == NULL || inf->aspace == NULL)
3205 internal_error (__FILE__, __LINE__,
3206 _("Can't determine the current "
3207 "address space of thread %s\n"),
3208 target_pid_to_str (ptid));
3209
3210 return inf->aspace;
3211 }
3212
3213
3214 /* Target file operations. */
3215
3216 static struct target_ops *
3217 default_fileio_target (void)
3218 {
3219 /* If we're already connected to something that can perform
3220 file I/O, use it. Otherwise, try using the native target. */
3221 if (current_target.to_stratum >= process_stratum)
3222 return current_target.beneath;
3223 else
3224 return find_default_run_target ("file I/O");
3225 }
3226
3227 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3228 target file descriptor, or -1 if an error occurs (and set
3229 *TARGET_ERRNO). */
3230 int
3231 target_fileio_open (const char *filename, int flags, int mode,
3232 int *target_errno)
3233 {
3234 struct target_ops *t;
3235
3236 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3237 {
3238 if (t->to_fileio_open != NULL)
3239 {
3240 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3241
3242 if (targetdebug)
3243 fprintf_unfiltered (gdb_stdlog,
3244 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3245 filename, flags, mode,
3246 fd, fd != -1 ? 0 : *target_errno);
3247 return fd;
3248 }
3249 }
3250
3251 *target_errno = FILEIO_ENOSYS;
3252 return -1;
3253 }
3254
3255 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3256 Return the number of bytes written, or -1 if an error occurs
3257 (and set *TARGET_ERRNO). */
3258 int
3259 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3260 ULONGEST offset, int *target_errno)
3261 {
3262 struct target_ops *t;
3263
3264 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3265 {
3266 if (t->to_fileio_pwrite != NULL)
3267 {
3268 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3269 target_errno);
3270
3271 if (targetdebug)
3272 fprintf_unfiltered (gdb_stdlog,
3273 "target_fileio_pwrite (%d,...,%d,%s) "
3274 "= %d (%d)\n",
3275 fd, len, pulongest (offset),
3276 ret, ret != -1 ? 0 : *target_errno);
3277 return ret;
3278 }
3279 }
3280
3281 *target_errno = FILEIO_ENOSYS;
3282 return -1;
3283 }
3284
3285 /* Read up to LEN bytes FD on the target into READ_BUF.
3286 Return the number of bytes read, or -1 if an error occurs
3287 (and set *TARGET_ERRNO). */
3288 int
3289 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3290 ULONGEST offset, int *target_errno)
3291 {
3292 struct target_ops *t;
3293
3294 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3295 {
3296 if (t->to_fileio_pread != NULL)
3297 {
3298 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3299 target_errno);
3300
3301 if (targetdebug)
3302 fprintf_unfiltered (gdb_stdlog,
3303 "target_fileio_pread (%d,...,%d,%s) "
3304 "= %d (%d)\n",
3305 fd, len, pulongest (offset),
3306 ret, ret != -1 ? 0 : *target_errno);
3307 return ret;
3308 }
3309 }
3310
3311 *target_errno = FILEIO_ENOSYS;
3312 return -1;
3313 }
3314
3315 /* Close FD on the target. Return 0, or -1 if an error occurs
3316 (and set *TARGET_ERRNO). */
3317 int
3318 target_fileio_close (int fd, int *target_errno)
3319 {
3320 struct target_ops *t;
3321
3322 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3323 {
3324 if (t->to_fileio_close != NULL)
3325 {
3326 int ret = t->to_fileio_close (fd, target_errno);
3327
3328 if (targetdebug)
3329 fprintf_unfiltered (gdb_stdlog,
3330 "target_fileio_close (%d) = %d (%d)\n",
3331 fd, ret, ret != -1 ? 0 : *target_errno);
3332 return ret;
3333 }
3334 }
3335
3336 *target_errno = FILEIO_ENOSYS;
3337 return -1;
3338 }
3339
3340 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3341 occurs (and set *TARGET_ERRNO). */
3342 int
3343 target_fileio_unlink (const char *filename, int *target_errno)
3344 {
3345 struct target_ops *t;
3346
3347 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3348 {
3349 if (t->to_fileio_unlink != NULL)
3350 {
3351 int ret = t->to_fileio_unlink (filename, target_errno);
3352
3353 if (targetdebug)
3354 fprintf_unfiltered (gdb_stdlog,
3355 "target_fileio_unlink (%s) = %d (%d)\n",
3356 filename, ret, ret != -1 ? 0 : *target_errno);
3357 return ret;
3358 }
3359 }
3360
3361 *target_errno = FILEIO_ENOSYS;
3362 return -1;
3363 }
3364
3365 /* Read value of symbolic link FILENAME on the target. Return a
3366 null-terminated string allocated via xmalloc, or NULL if an error
3367 occurs (and set *TARGET_ERRNO). */
3368 char *
3369 target_fileio_readlink (const char *filename, int *target_errno)
3370 {
3371 struct target_ops *t;
3372
3373 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3374 {
3375 if (t->to_fileio_readlink != NULL)
3376 {
3377 char *ret = t->to_fileio_readlink (filename, target_errno);
3378
3379 if (targetdebug)
3380 fprintf_unfiltered (gdb_stdlog,
3381 "target_fileio_readlink (%s) = %s (%d)\n",
3382 filename, ret? ret : "(nil)",
3383 ret? 0 : *target_errno);
3384 return ret;
3385 }
3386 }
3387
3388 *target_errno = FILEIO_ENOSYS;
3389 return NULL;
3390 }
3391
3392 static void
3393 target_fileio_close_cleanup (void *opaque)
3394 {
3395 int fd = *(int *) opaque;
3396 int target_errno;
3397
3398 target_fileio_close (fd, &target_errno);
3399 }
3400
3401 /* Read target file FILENAME. Store the result in *BUF_P and
3402 return the size of the transferred data. PADDING additional bytes are
3403 available in *BUF_P. This is a helper function for
3404 target_fileio_read_alloc; see the declaration of that function for more
3405 information. */
3406
3407 static LONGEST
3408 target_fileio_read_alloc_1 (const char *filename,
3409 gdb_byte **buf_p, int padding)
3410 {
3411 struct cleanup *close_cleanup;
3412 size_t buf_alloc, buf_pos;
3413 gdb_byte *buf;
3414 LONGEST n;
3415 int fd;
3416 int target_errno;
3417
3418 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3419 if (fd == -1)
3420 return -1;
3421
3422 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3423
3424 /* Start by reading up to 4K at a time. The target will throttle
3425 this number down if necessary. */
3426 buf_alloc = 4096;
3427 buf = xmalloc (buf_alloc);
3428 buf_pos = 0;
3429 while (1)
3430 {
3431 n = target_fileio_pread (fd, &buf[buf_pos],
3432 buf_alloc - buf_pos - padding, buf_pos,
3433 &target_errno);
3434 if (n < 0)
3435 {
3436 /* An error occurred. */
3437 do_cleanups (close_cleanup);
3438 xfree (buf);
3439 return -1;
3440 }
3441 else if (n == 0)
3442 {
3443 /* Read all there was. */
3444 do_cleanups (close_cleanup);
3445 if (buf_pos == 0)
3446 xfree (buf);
3447 else
3448 *buf_p = buf;
3449 return buf_pos;
3450 }
3451
3452 buf_pos += n;
3453
3454 /* If the buffer is filling up, expand it. */
3455 if (buf_alloc < buf_pos * 2)
3456 {
3457 buf_alloc *= 2;
3458 buf = xrealloc (buf, buf_alloc);
3459 }
3460
3461 QUIT;
3462 }
3463 }
3464
3465 /* Read target file FILENAME. Store the result in *BUF_P and return
3466 the size of the transferred data. See the declaration in "target.h"
3467 function for more information about the return value. */
3468
3469 LONGEST
3470 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3471 {
3472 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3473 }
3474
3475 /* Read target file FILENAME. The result is NUL-terminated and
3476 returned as a string, allocated using xmalloc. If an error occurs
3477 or the transfer is unsupported, NULL is returned. Empty objects
3478 are returned as allocated but empty strings. A warning is issued
3479 if the result contains any embedded NUL bytes. */
3480
3481 char *
3482 target_fileio_read_stralloc (const char *filename)
3483 {
3484 gdb_byte *buffer;
3485 LONGEST i, transferred;
3486
3487 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3488
3489 if (transferred < 0)
3490 return NULL;
3491
3492 if (transferred == 0)
3493 return xstrdup ("");
3494
3495 buffer[transferred] = 0;
3496
3497 /* Check for embedded NUL bytes; but allow trailing NULs. */
3498 for (i = strlen (buffer); i < transferred; i++)
3499 if (buffer[i] != 0)
3500 {
3501 warning (_("target file %s "
3502 "contained unexpected null characters"),
3503 filename);
3504 break;
3505 }
3506
3507 return (char *) buffer;
3508 }
3509
3510
3511 static int
3512 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3513 {
3514 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3515 }
3516
3517 static int
3518 default_watchpoint_addr_within_range (struct target_ops *target,
3519 CORE_ADDR addr,
3520 CORE_ADDR start, int length)
3521 {
3522 return addr >= start && addr < start + length;
3523 }
3524
3525 static struct gdbarch *
3526 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3527 {
3528 return target_gdbarch;
3529 }
3530
3531 static int
3532 return_zero (void)
3533 {
3534 return 0;
3535 }
3536
3537 static int
3538 return_one (void)
3539 {
3540 return 1;
3541 }
3542
3543 static int
3544 return_minus_one (void)
3545 {
3546 return -1;
3547 }
3548
3549 /* Find a single runnable target in the stack and return it. If for
3550 some reason there is more than one, return NULL. */
3551
3552 struct target_ops *
3553 find_run_target (void)
3554 {
3555 struct target_ops **t;
3556 struct target_ops *runable = NULL;
3557 int count;
3558
3559 count = 0;
3560
3561 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3562 {
3563 if ((*t)->to_can_run && target_can_run (*t))
3564 {
3565 runable = *t;
3566 ++count;
3567 }
3568 }
3569
3570 return (count == 1 ? runable : NULL);
3571 }
3572
3573 /*
3574 * Find the next target down the stack from the specified target.
3575 */
3576
3577 struct target_ops *
3578 find_target_beneath (struct target_ops *t)
3579 {
3580 return t->beneath;
3581 }
3582
3583 \f
3584 /* The inferior process has died. Long live the inferior! */
3585
3586 void
3587 generic_mourn_inferior (void)
3588 {
3589 ptid_t ptid;
3590
3591 ptid = inferior_ptid;
3592 inferior_ptid = null_ptid;
3593
3594 /* Mark breakpoints uninserted in case something tries to delete a
3595 breakpoint while we delete the inferior's threads (which would
3596 fail, since the inferior is long gone). */
3597 mark_breakpoints_out ();
3598
3599 if (!ptid_equal (ptid, null_ptid))
3600 {
3601 int pid = ptid_get_pid (ptid);
3602 exit_inferior (pid);
3603 }
3604
3605 /* Note this wipes step-resume breakpoints, so needs to be done
3606 after exit_inferior, which ends up referencing the step-resume
3607 breakpoints through clear_thread_inferior_resources. */
3608 breakpoint_init_inferior (inf_exited);
3609
3610 registers_changed ();
3611
3612 reopen_exec_file ();
3613 reinit_frame_cache ();
3614
3615 if (deprecated_detach_hook)
3616 deprecated_detach_hook ();
3617 }
3618 \f
3619 /* Helper function for child_wait and the derivatives of child_wait.
3620 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3621 translation of that in OURSTATUS. */
3622 void
3623 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3624 {
3625 if (WIFEXITED (hoststatus))
3626 {
3627 ourstatus->kind = TARGET_WAITKIND_EXITED;
3628 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3629 }
3630 else if (!WIFSTOPPED (hoststatus))
3631 {
3632 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3633 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3634 }
3635 else
3636 {
3637 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3638 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3639 }
3640 }
3641 \f
3642 /* Convert a normal process ID to a string. Returns the string in a
3643 static buffer. */
3644
3645 char *
3646 normal_pid_to_str (ptid_t ptid)
3647 {
3648 static char buf[32];
3649
3650 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3651 return buf;
3652 }
3653
3654 static char *
3655 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3656 {
3657 return normal_pid_to_str (ptid);
3658 }
3659
3660 /* Error-catcher for target_find_memory_regions. */
3661 static int
3662 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3663 {
3664 error (_("Command not implemented for this target."));
3665 return 0;
3666 }
3667
3668 /* Error-catcher for target_make_corefile_notes. */
3669 static char *
3670 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3671 {
3672 error (_("Command not implemented for this target."));
3673 return NULL;
3674 }
3675
3676 /* Error-catcher for target_get_bookmark. */
3677 static gdb_byte *
3678 dummy_get_bookmark (char *ignore1, int ignore2)
3679 {
3680 tcomplain ();
3681 return NULL;
3682 }
3683
3684 /* Error-catcher for target_goto_bookmark. */
3685 static void
3686 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3687 {
3688 tcomplain ();
3689 }
3690
3691 /* Set up the handful of non-empty slots needed by the dummy target
3692 vector. */
3693
3694 static void
3695 init_dummy_target (void)
3696 {
3697 dummy_target.to_shortname = "None";
3698 dummy_target.to_longname = "None";
3699 dummy_target.to_doc = "";
3700 dummy_target.to_attach = find_default_attach;
3701 dummy_target.to_detach =
3702 (void (*)(struct target_ops *, char *, int))target_ignore;
3703 dummy_target.to_create_inferior = find_default_create_inferior;
3704 dummy_target.to_can_async_p = find_default_can_async_p;
3705 dummy_target.to_is_async_p = find_default_is_async_p;
3706 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3707 dummy_target.to_supports_disable_randomization
3708 = find_default_supports_disable_randomization;
3709 dummy_target.to_pid_to_str = dummy_pid_to_str;
3710 dummy_target.to_stratum = dummy_stratum;
3711 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3712 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3713 dummy_target.to_get_bookmark = dummy_get_bookmark;
3714 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3715 dummy_target.to_xfer_partial = default_xfer_partial;
3716 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3717 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3718 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3719 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3720 dummy_target.to_has_execution
3721 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3722 dummy_target.to_stopped_by_watchpoint = return_zero;
3723 dummy_target.to_stopped_data_address =
3724 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3725 dummy_target.to_magic = OPS_MAGIC;
3726 }
3727 \f
3728 static void
3729 debug_to_open (char *args, int from_tty)
3730 {
3731 debug_target.to_open (args, from_tty);
3732
3733 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3734 }
3735
3736 void
3737 target_close (struct target_ops *targ, int quitting)
3738 {
3739 if (targ->to_xclose != NULL)
3740 targ->to_xclose (targ, quitting);
3741 else if (targ->to_close != NULL)
3742 targ->to_close (quitting);
3743
3744 if (targetdebug)
3745 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3746 }
3747
3748 void
3749 target_attach (char *args, int from_tty)
3750 {
3751 struct target_ops *t;
3752
3753 for (t = current_target.beneath; t != NULL; t = t->beneath)
3754 {
3755 if (t->to_attach != NULL)
3756 {
3757 t->to_attach (t, args, from_tty);
3758 if (targetdebug)
3759 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3760 args, from_tty);
3761 return;
3762 }
3763 }
3764
3765 internal_error (__FILE__, __LINE__,
3766 _("could not find a target to attach"));
3767 }
3768
3769 int
3770 target_thread_alive (ptid_t ptid)
3771 {
3772 struct target_ops *t;
3773
3774 for (t = current_target.beneath; t != NULL; t = t->beneath)
3775 {
3776 if (t->to_thread_alive != NULL)
3777 {
3778 int retval;
3779
3780 retval = t->to_thread_alive (t, ptid);
3781 if (targetdebug)
3782 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3783 PIDGET (ptid), retval);
3784
3785 return retval;
3786 }
3787 }
3788
3789 return 0;
3790 }
3791
3792 void
3793 target_find_new_threads (void)
3794 {
3795 struct target_ops *t;
3796
3797 for (t = current_target.beneath; t != NULL; t = t->beneath)
3798 {
3799 if (t->to_find_new_threads != NULL)
3800 {
3801 t->to_find_new_threads (t);
3802 if (targetdebug)
3803 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3804
3805 return;
3806 }
3807 }
3808 }
3809
3810 void
3811 target_stop (ptid_t ptid)
3812 {
3813 if (!may_stop)
3814 {
3815 warning (_("May not interrupt or stop the target, ignoring attempt"));
3816 return;
3817 }
3818
3819 (*current_target.to_stop) (ptid);
3820 }
3821
3822 static void
3823 debug_to_post_attach (int pid)
3824 {
3825 debug_target.to_post_attach (pid);
3826
3827 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3828 }
3829
3830 /* Return a pretty printed form of target_waitstatus.
3831 Space for the result is malloc'd, caller must free. */
3832
3833 char *
3834 target_waitstatus_to_string (const struct target_waitstatus *ws)
3835 {
3836 const char *kind_str = "status->kind = ";
3837
3838 switch (ws->kind)
3839 {
3840 case TARGET_WAITKIND_EXITED:
3841 return xstrprintf ("%sexited, status = %d",
3842 kind_str, ws->value.integer);
3843 case TARGET_WAITKIND_STOPPED:
3844 return xstrprintf ("%sstopped, signal = %s",
3845 kind_str, target_signal_to_name (ws->value.sig));
3846 case TARGET_WAITKIND_SIGNALLED:
3847 return xstrprintf ("%ssignalled, signal = %s",
3848 kind_str, target_signal_to_name (ws->value.sig));
3849 case TARGET_WAITKIND_LOADED:
3850 return xstrprintf ("%sloaded", kind_str);
3851 case TARGET_WAITKIND_FORKED:
3852 return xstrprintf ("%sforked", kind_str);
3853 case TARGET_WAITKIND_VFORKED:
3854 return xstrprintf ("%svforked", kind_str);
3855 case TARGET_WAITKIND_EXECD:
3856 return xstrprintf ("%sexecd", kind_str);
3857 case TARGET_WAITKIND_SYSCALL_ENTRY:
3858 return xstrprintf ("%sentered syscall", kind_str);
3859 case TARGET_WAITKIND_SYSCALL_RETURN:
3860 return xstrprintf ("%sexited syscall", kind_str);
3861 case TARGET_WAITKIND_SPURIOUS:
3862 return xstrprintf ("%sspurious", kind_str);
3863 case TARGET_WAITKIND_IGNORE:
3864 return xstrprintf ("%signore", kind_str);
3865 case TARGET_WAITKIND_NO_HISTORY:
3866 return xstrprintf ("%sno-history", kind_str);
3867 case TARGET_WAITKIND_NO_RESUMED:
3868 return xstrprintf ("%sno-resumed", kind_str);
3869 default:
3870 return xstrprintf ("%sunknown???", kind_str);
3871 }
3872 }
3873
3874 static void
3875 debug_print_register (const char * func,
3876 struct regcache *regcache, int regno)
3877 {
3878 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3879
3880 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3881 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3882 && gdbarch_register_name (gdbarch, regno) != NULL
3883 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3884 fprintf_unfiltered (gdb_stdlog, "(%s)",
3885 gdbarch_register_name (gdbarch, regno));
3886 else
3887 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3888 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3889 {
3890 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3891 int i, size = register_size (gdbarch, regno);
3892 unsigned char buf[MAX_REGISTER_SIZE];
3893
3894 regcache_raw_collect (regcache, regno, buf);
3895 fprintf_unfiltered (gdb_stdlog, " = ");
3896 for (i = 0; i < size; i++)
3897 {
3898 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3899 }
3900 if (size <= sizeof (LONGEST))
3901 {
3902 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3903
3904 fprintf_unfiltered (gdb_stdlog, " %s %s",
3905 core_addr_to_string_nz (val), plongest (val));
3906 }
3907 }
3908 fprintf_unfiltered (gdb_stdlog, "\n");
3909 }
3910
3911 void
3912 target_fetch_registers (struct regcache *regcache, int regno)
3913 {
3914 struct target_ops *t;
3915
3916 for (t = current_target.beneath; t != NULL; t = t->beneath)
3917 {
3918 if (t->to_fetch_registers != NULL)
3919 {
3920 t->to_fetch_registers (t, regcache, regno);
3921 if (targetdebug)
3922 debug_print_register ("target_fetch_registers", regcache, regno);
3923 return;
3924 }
3925 }
3926 }
3927
3928 void
3929 target_store_registers (struct regcache *regcache, int regno)
3930 {
3931 struct target_ops *t;
3932
3933 if (!may_write_registers)
3934 error (_("Writing to registers is not allowed (regno %d)"), regno);
3935
3936 for (t = current_target.beneath; t != NULL; t = t->beneath)
3937 {
3938 if (t->to_store_registers != NULL)
3939 {
3940 t->to_store_registers (t, regcache, regno);
3941 if (targetdebug)
3942 {
3943 debug_print_register ("target_store_registers", regcache, regno);
3944 }
3945 return;
3946 }
3947 }
3948
3949 noprocess ();
3950 }
3951
3952 int
3953 target_core_of_thread (ptid_t ptid)
3954 {
3955 struct target_ops *t;
3956
3957 for (t = current_target.beneath; t != NULL; t = t->beneath)
3958 {
3959 if (t->to_core_of_thread != NULL)
3960 {
3961 int retval = t->to_core_of_thread (t, ptid);
3962
3963 if (targetdebug)
3964 fprintf_unfiltered (gdb_stdlog,
3965 "target_core_of_thread (%d) = %d\n",
3966 PIDGET (ptid), retval);
3967 return retval;
3968 }
3969 }
3970
3971 return -1;
3972 }
3973
3974 int
3975 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3976 {
3977 struct target_ops *t;
3978
3979 for (t = current_target.beneath; t != NULL; t = t->beneath)
3980 {
3981 if (t->to_verify_memory != NULL)
3982 {
3983 int retval = t->to_verify_memory (t, data, memaddr, size);
3984
3985 if (targetdebug)
3986 fprintf_unfiltered (gdb_stdlog,
3987 "target_verify_memory (%s, %s) = %d\n",
3988 paddress (target_gdbarch, memaddr),
3989 pulongest (size),
3990 retval);
3991 return retval;
3992 }
3993 }
3994
3995 tcomplain ();
3996 }
3997
3998 /* The documentation for this function is in its prototype declaration in
3999 target.h. */
4000
4001 int
4002 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4003 {
4004 struct target_ops *t;
4005
4006 for (t = current_target.beneath; t != NULL; t = t->beneath)
4007 if (t->to_insert_mask_watchpoint != NULL)
4008 {
4009 int ret;
4010
4011 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4012
4013 if (targetdebug)
4014 fprintf_unfiltered (gdb_stdlog, "\
4015 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4016 core_addr_to_string (addr),
4017 core_addr_to_string (mask), rw, ret);
4018
4019 return ret;
4020 }
4021
4022 return 1;
4023 }
4024
4025 /* The documentation for this function is in its prototype declaration in
4026 target.h. */
4027
4028 int
4029 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4030 {
4031 struct target_ops *t;
4032
4033 for (t = current_target.beneath; t != NULL; t = t->beneath)
4034 if (t->to_remove_mask_watchpoint != NULL)
4035 {
4036 int ret;
4037
4038 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4039
4040 if (targetdebug)
4041 fprintf_unfiltered (gdb_stdlog, "\
4042 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4043 core_addr_to_string (addr),
4044 core_addr_to_string (mask), rw, ret);
4045
4046 return ret;
4047 }
4048
4049 return 1;
4050 }
4051
4052 /* The documentation for this function is in its prototype declaration
4053 in target.h. */
4054
4055 int
4056 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4057 {
4058 struct target_ops *t;
4059
4060 for (t = current_target.beneath; t != NULL; t = t->beneath)
4061 if (t->to_masked_watch_num_registers != NULL)
4062 return t->to_masked_watch_num_registers (t, addr, mask);
4063
4064 return -1;
4065 }
4066
4067 /* The documentation for this function is in its prototype declaration
4068 in target.h. */
4069
4070 int
4071 target_ranged_break_num_registers (void)
4072 {
4073 struct target_ops *t;
4074
4075 for (t = current_target.beneath; t != NULL; t = t->beneath)
4076 if (t->to_ranged_break_num_registers != NULL)
4077 return t->to_ranged_break_num_registers (t);
4078
4079 return -1;
4080 }
4081
4082 static void
4083 debug_to_prepare_to_store (struct regcache *regcache)
4084 {
4085 debug_target.to_prepare_to_store (regcache);
4086
4087 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4088 }
4089
4090 static int
4091 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4092 int write, struct mem_attrib *attrib,
4093 struct target_ops *target)
4094 {
4095 int retval;
4096
4097 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4098 attrib, target);
4099
4100 fprintf_unfiltered (gdb_stdlog,
4101 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4102 paddress (target_gdbarch, memaddr), len,
4103 write ? "write" : "read", retval);
4104
4105 if (retval > 0)
4106 {
4107 int i;
4108
4109 fputs_unfiltered (", bytes =", gdb_stdlog);
4110 for (i = 0; i < retval; i++)
4111 {
4112 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4113 {
4114 if (targetdebug < 2 && i > 0)
4115 {
4116 fprintf_unfiltered (gdb_stdlog, " ...");
4117 break;
4118 }
4119 fprintf_unfiltered (gdb_stdlog, "\n");
4120 }
4121
4122 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4123 }
4124 }
4125
4126 fputc_unfiltered ('\n', gdb_stdlog);
4127
4128 return retval;
4129 }
4130
4131 static void
4132 debug_to_files_info (struct target_ops *target)
4133 {
4134 debug_target.to_files_info (target);
4135
4136 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4137 }
4138
4139 static int
4140 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4141 struct bp_target_info *bp_tgt)
4142 {
4143 int retval;
4144
4145 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4146
4147 fprintf_unfiltered (gdb_stdlog,
4148 "target_insert_breakpoint (%s, xxx) = %ld\n",
4149 core_addr_to_string (bp_tgt->placed_address),
4150 (unsigned long) retval);
4151 return retval;
4152 }
4153
4154 static int
4155 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4156 struct bp_target_info *bp_tgt)
4157 {
4158 int retval;
4159
4160 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4161
4162 fprintf_unfiltered (gdb_stdlog,
4163 "target_remove_breakpoint (%s, xxx) = %ld\n",
4164 core_addr_to_string (bp_tgt->placed_address),
4165 (unsigned long) retval);
4166 return retval;
4167 }
4168
4169 static int
4170 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4171 {
4172 int retval;
4173
4174 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4175
4176 fprintf_unfiltered (gdb_stdlog,
4177 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4178 (unsigned long) type,
4179 (unsigned long) cnt,
4180 (unsigned long) from_tty,
4181 (unsigned long) retval);
4182 return retval;
4183 }
4184
4185 static int
4186 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4187 {
4188 CORE_ADDR retval;
4189
4190 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4191
4192 fprintf_unfiltered (gdb_stdlog,
4193 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4194 core_addr_to_string (addr), (unsigned long) len,
4195 core_addr_to_string (retval));
4196 return retval;
4197 }
4198
4199 static int
4200 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4201 struct expression *cond)
4202 {
4203 int retval;
4204
4205 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4206 rw, cond);
4207
4208 fprintf_unfiltered (gdb_stdlog,
4209 "target_can_accel_watchpoint_condition "
4210 "(%s, %d, %d, %s) = %ld\n",
4211 core_addr_to_string (addr), len, rw,
4212 host_address_to_string (cond), (unsigned long) retval);
4213 return retval;
4214 }
4215
4216 static int
4217 debug_to_stopped_by_watchpoint (void)
4218 {
4219 int retval;
4220
4221 retval = debug_target.to_stopped_by_watchpoint ();
4222
4223 fprintf_unfiltered (gdb_stdlog,
4224 "target_stopped_by_watchpoint () = %ld\n",
4225 (unsigned long) retval);
4226 return retval;
4227 }
4228
4229 static int
4230 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4231 {
4232 int retval;
4233
4234 retval = debug_target.to_stopped_data_address (target, addr);
4235
4236 fprintf_unfiltered (gdb_stdlog,
4237 "target_stopped_data_address ([%s]) = %ld\n",
4238 core_addr_to_string (*addr),
4239 (unsigned long)retval);
4240 return retval;
4241 }
4242
4243 static int
4244 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4245 CORE_ADDR addr,
4246 CORE_ADDR start, int length)
4247 {
4248 int retval;
4249
4250 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4251 start, length);
4252
4253 fprintf_filtered (gdb_stdlog,
4254 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4255 core_addr_to_string (addr), core_addr_to_string (start),
4256 length, retval);
4257 return retval;
4258 }
4259
4260 static int
4261 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4262 struct bp_target_info *bp_tgt)
4263 {
4264 int retval;
4265
4266 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4267
4268 fprintf_unfiltered (gdb_stdlog,
4269 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4270 core_addr_to_string (bp_tgt->placed_address),
4271 (unsigned long) retval);
4272 return retval;
4273 }
4274
4275 static int
4276 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4277 struct bp_target_info *bp_tgt)
4278 {
4279 int retval;
4280
4281 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4282
4283 fprintf_unfiltered (gdb_stdlog,
4284 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4285 core_addr_to_string (bp_tgt->placed_address),
4286 (unsigned long) retval);
4287 return retval;
4288 }
4289
4290 static int
4291 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4292 struct expression *cond)
4293 {
4294 int retval;
4295
4296 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4297
4298 fprintf_unfiltered (gdb_stdlog,
4299 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4300 core_addr_to_string (addr), len, type,
4301 host_address_to_string (cond), (unsigned long) retval);
4302 return retval;
4303 }
4304
4305 static int
4306 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4307 struct expression *cond)
4308 {
4309 int retval;
4310
4311 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4312
4313 fprintf_unfiltered (gdb_stdlog,
4314 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4315 core_addr_to_string (addr), len, type,
4316 host_address_to_string (cond), (unsigned long) retval);
4317 return retval;
4318 }
4319
4320 static void
4321 debug_to_terminal_init (void)
4322 {
4323 debug_target.to_terminal_init ();
4324
4325 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4326 }
4327
4328 static void
4329 debug_to_terminal_inferior (void)
4330 {
4331 debug_target.to_terminal_inferior ();
4332
4333 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4334 }
4335
4336 static void
4337 debug_to_terminal_ours_for_output (void)
4338 {
4339 debug_target.to_terminal_ours_for_output ();
4340
4341 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4342 }
4343
4344 static void
4345 debug_to_terminal_ours (void)
4346 {
4347 debug_target.to_terminal_ours ();
4348
4349 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4350 }
4351
4352 static void
4353 debug_to_terminal_save_ours (void)
4354 {
4355 debug_target.to_terminal_save_ours ();
4356
4357 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4358 }
4359
4360 static void
4361 debug_to_terminal_info (char *arg, int from_tty)
4362 {
4363 debug_target.to_terminal_info (arg, from_tty);
4364
4365 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4366 from_tty);
4367 }
4368
4369 static void
4370 debug_to_load (char *args, int from_tty)
4371 {
4372 debug_target.to_load (args, from_tty);
4373
4374 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4375 }
4376
4377 static void
4378 debug_to_post_startup_inferior (ptid_t ptid)
4379 {
4380 debug_target.to_post_startup_inferior (ptid);
4381
4382 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4383 PIDGET (ptid));
4384 }
4385
4386 static int
4387 debug_to_insert_fork_catchpoint (int pid)
4388 {
4389 int retval;
4390
4391 retval = debug_target.to_insert_fork_catchpoint (pid);
4392
4393 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4394 pid, retval);
4395
4396 return retval;
4397 }
4398
4399 static int
4400 debug_to_remove_fork_catchpoint (int pid)
4401 {
4402 int retval;
4403
4404 retval = debug_target.to_remove_fork_catchpoint (pid);
4405
4406 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4407 pid, retval);
4408
4409 return retval;
4410 }
4411
4412 static int
4413 debug_to_insert_vfork_catchpoint (int pid)
4414 {
4415 int retval;
4416
4417 retval = debug_target.to_insert_vfork_catchpoint (pid);
4418
4419 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4420 pid, retval);
4421
4422 return retval;
4423 }
4424
4425 static int
4426 debug_to_remove_vfork_catchpoint (int pid)
4427 {
4428 int retval;
4429
4430 retval = debug_target.to_remove_vfork_catchpoint (pid);
4431
4432 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4433 pid, retval);
4434
4435 return retval;
4436 }
4437
4438 static int
4439 debug_to_insert_exec_catchpoint (int pid)
4440 {
4441 int retval;
4442
4443 retval = debug_target.to_insert_exec_catchpoint (pid);
4444
4445 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4446 pid, retval);
4447
4448 return retval;
4449 }
4450
4451 static int
4452 debug_to_remove_exec_catchpoint (int pid)
4453 {
4454 int retval;
4455
4456 retval = debug_target.to_remove_exec_catchpoint (pid);
4457
4458 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4459 pid, retval);
4460
4461 return retval;
4462 }
4463
4464 static int
4465 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4466 {
4467 int has_exited;
4468
4469 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4470
4471 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4472 pid, wait_status, *exit_status, has_exited);
4473
4474 return has_exited;
4475 }
4476
4477 static int
4478 debug_to_can_run (void)
4479 {
4480 int retval;
4481
4482 retval = debug_target.to_can_run ();
4483
4484 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4485
4486 return retval;
4487 }
4488
4489 static struct gdbarch *
4490 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4491 {
4492 struct gdbarch *retval;
4493
4494 retval = debug_target.to_thread_architecture (ops, ptid);
4495
4496 fprintf_unfiltered (gdb_stdlog,
4497 "target_thread_architecture (%s) = %s [%s]\n",
4498 target_pid_to_str (ptid),
4499 host_address_to_string (retval),
4500 gdbarch_bfd_arch_info (retval)->printable_name);
4501 return retval;
4502 }
4503
4504 static void
4505 debug_to_stop (ptid_t ptid)
4506 {
4507 debug_target.to_stop (ptid);
4508
4509 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4510 target_pid_to_str (ptid));
4511 }
4512
4513 static void
4514 debug_to_rcmd (char *command,
4515 struct ui_file *outbuf)
4516 {
4517 debug_target.to_rcmd (command, outbuf);
4518 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4519 }
4520
4521 static char *
4522 debug_to_pid_to_exec_file (int pid)
4523 {
4524 char *exec_file;
4525
4526 exec_file = debug_target.to_pid_to_exec_file (pid);
4527
4528 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4529 pid, exec_file);
4530
4531 return exec_file;
4532 }
4533
4534 static void
4535 setup_target_debug (void)
4536 {
4537 memcpy (&debug_target, &current_target, sizeof debug_target);
4538
4539 current_target.to_open = debug_to_open;
4540 current_target.to_post_attach = debug_to_post_attach;
4541 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4542 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4543 current_target.to_files_info = debug_to_files_info;
4544 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4545 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4546 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4547 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4548 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4549 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4550 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4551 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4552 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4553 current_target.to_watchpoint_addr_within_range
4554 = debug_to_watchpoint_addr_within_range;
4555 current_target.to_region_ok_for_hw_watchpoint
4556 = debug_to_region_ok_for_hw_watchpoint;
4557 current_target.to_can_accel_watchpoint_condition
4558 = debug_to_can_accel_watchpoint_condition;
4559 current_target.to_terminal_init = debug_to_terminal_init;
4560 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4561 current_target.to_terminal_ours_for_output
4562 = debug_to_terminal_ours_for_output;
4563 current_target.to_terminal_ours = debug_to_terminal_ours;
4564 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4565 current_target.to_terminal_info = debug_to_terminal_info;
4566 current_target.to_load = debug_to_load;
4567 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4568 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4569 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4570 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4571 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4572 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4573 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4574 current_target.to_has_exited = debug_to_has_exited;
4575 current_target.to_can_run = debug_to_can_run;
4576 current_target.to_stop = debug_to_stop;
4577 current_target.to_rcmd = debug_to_rcmd;
4578 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4579 current_target.to_thread_architecture = debug_to_thread_architecture;
4580 }
4581 \f
4582
4583 static char targ_desc[] =
4584 "Names of targets and files being debugged.\nShows the entire \
4585 stack of targets currently in use (including the exec-file,\n\
4586 core-file, and process, if any), as well as the symbol file name.";
4587
4588 static void
4589 do_monitor_command (char *cmd,
4590 int from_tty)
4591 {
4592 if ((current_target.to_rcmd
4593 == (void (*) (char *, struct ui_file *)) tcomplain)
4594 || (current_target.to_rcmd == debug_to_rcmd
4595 && (debug_target.to_rcmd
4596 == (void (*) (char *, struct ui_file *)) tcomplain)))
4597 error (_("\"monitor\" command not supported by this target."));
4598 target_rcmd (cmd, gdb_stdtarg);
4599 }
4600
4601 /* Print the name of each layers of our target stack. */
4602
4603 static void
4604 maintenance_print_target_stack (char *cmd, int from_tty)
4605 {
4606 struct target_ops *t;
4607
4608 printf_filtered (_("The current target stack is:\n"));
4609
4610 for (t = target_stack; t != NULL; t = t->beneath)
4611 {
4612 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4613 }
4614 }
4615
4616 /* Controls if async mode is permitted. */
4617 int target_async_permitted = 0;
4618
4619 /* The set command writes to this variable. If the inferior is
4620 executing, linux_nat_async_permitted is *not* updated. */
4621 static int target_async_permitted_1 = 0;
4622
4623 static void
4624 set_maintenance_target_async_permitted (char *args, int from_tty,
4625 struct cmd_list_element *c)
4626 {
4627 if (have_live_inferiors ())
4628 {
4629 target_async_permitted_1 = target_async_permitted;
4630 error (_("Cannot change this setting while the inferior is running."));
4631 }
4632
4633 target_async_permitted = target_async_permitted_1;
4634 }
4635
4636 static void
4637 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4638 struct cmd_list_element *c,
4639 const char *value)
4640 {
4641 fprintf_filtered (file,
4642 _("Controlling the inferior in "
4643 "asynchronous mode is %s.\n"), value);
4644 }
4645
4646 /* Temporary copies of permission settings. */
4647
4648 static int may_write_registers_1 = 1;
4649 static int may_write_memory_1 = 1;
4650 static int may_insert_breakpoints_1 = 1;
4651 static int may_insert_tracepoints_1 = 1;
4652 static int may_insert_fast_tracepoints_1 = 1;
4653 static int may_stop_1 = 1;
4654
4655 /* Make the user-set values match the real values again. */
4656
4657 void
4658 update_target_permissions (void)
4659 {
4660 may_write_registers_1 = may_write_registers;
4661 may_write_memory_1 = may_write_memory;
4662 may_insert_breakpoints_1 = may_insert_breakpoints;
4663 may_insert_tracepoints_1 = may_insert_tracepoints;
4664 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4665 may_stop_1 = may_stop;
4666 }
4667
4668 /* The one function handles (most of) the permission flags in the same
4669 way. */
4670
4671 static void
4672 set_target_permissions (char *args, int from_tty,
4673 struct cmd_list_element *c)
4674 {
4675 if (target_has_execution)
4676 {
4677 update_target_permissions ();
4678 error (_("Cannot change this setting while the inferior is running."));
4679 }
4680
4681 /* Make the real values match the user-changed values. */
4682 may_write_registers = may_write_registers_1;
4683 may_insert_breakpoints = may_insert_breakpoints_1;
4684 may_insert_tracepoints = may_insert_tracepoints_1;
4685 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4686 may_stop = may_stop_1;
4687 update_observer_mode ();
4688 }
4689
4690 /* Set memory write permission independently of observer mode. */
4691
4692 static void
4693 set_write_memory_permission (char *args, int from_tty,
4694 struct cmd_list_element *c)
4695 {
4696 /* Make the real values match the user-changed values. */
4697 may_write_memory = may_write_memory_1;
4698 update_observer_mode ();
4699 }
4700
4701
4702 void
4703 initialize_targets (void)
4704 {
4705 init_dummy_target ();
4706 push_target (&dummy_target);
4707
4708 add_info ("target", target_info, targ_desc);
4709 add_info ("files", target_info, targ_desc);
4710
4711 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4712 Set target debugging."), _("\
4713 Show target debugging."), _("\
4714 When non-zero, target debugging is enabled. Higher numbers are more\n\
4715 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4716 command."),
4717 NULL,
4718 show_targetdebug,
4719 &setdebuglist, &showdebuglist);
4720
4721 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4722 &trust_readonly, _("\
4723 Set mode for reading from readonly sections."), _("\
4724 Show mode for reading from readonly sections."), _("\
4725 When this mode is on, memory reads from readonly sections (such as .text)\n\
4726 will be read from the object file instead of from the target. This will\n\
4727 result in significant performance improvement for remote targets."),
4728 NULL,
4729 show_trust_readonly,
4730 &setlist, &showlist);
4731
4732 add_com ("monitor", class_obscure, do_monitor_command,
4733 _("Send a command to the remote monitor (remote targets only)."));
4734
4735 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4736 _("Print the name of each layer of the internal target stack."),
4737 &maintenanceprintlist);
4738
4739 add_setshow_boolean_cmd ("target-async", no_class,
4740 &target_async_permitted_1, _("\
4741 Set whether gdb controls the inferior in asynchronous mode."), _("\
4742 Show whether gdb controls the inferior in asynchronous mode."), _("\
4743 Tells gdb whether to control the inferior in asynchronous mode."),
4744 set_maintenance_target_async_permitted,
4745 show_maintenance_target_async_permitted,
4746 &setlist,
4747 &showlist);
4748
4749 add_setshow_boolean_cmd ("stack-cache", class_support,
4750 &stack_cache_enabled_p_1, _("\
4751 Set cache use for stack access."), _("\
4752 Show cache use for stack access."), _("\
4753 When on, use the data cache for all stack access, regardless of any\n\
4754 configured memory regions. This improves remote performance significantly.\n\
4755 By default, caching for stack access is on."),
4756 set_stack_cache_enabled_p,
4757 show_stack_cache_enabled_p,
4758 &setlist, &showlist);
4759
4760 add_setshow_boolean_cmd ("may-write-registers", class_support,
4761 &may_write_registers_1, _("\
4762 Set permission to write into registers."), _("\
4763 Show permission to write into registers."), _("\
4764 When this permission is on, GDB may write into the target's registers.\n\
4765 Otherwise, any sort of write attempt will result in an error."),
4766 set_target_permissions, NULL,
4767 &setlist, &showlist);
4768
4769 add_setshow_boolean_cmd ("may-write-memory", class_support,
4770 &may_write_memory_1, _("\
4771 Set permission to write into target memory."), _("\
4772 Show permission to write into target memory."), _("\
4773 When this permission is on, GDB may write into the target's memory.\n\
4774 Otherwise, any sort of write attempt will result in an error."),
4775 set_write_memory_permission, NULL,
4776 &setlist, &showlist);
4777
4778 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4779 &may_insert_breakpoints_1, _("\
4780 Set permission to insert breakpoints in the target."), _("\
4781 Show permission to insert breakpoints in the target."), _("\
4782 When this permission is on, GDB may insert breakpoints in the program.\n\
4783 Otherwise, any sort of insertion attempt will result in an error."),
4784 set_target_permissions, NULL,
4785 &setlist, &showlist);
4786
4787 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4788 &may_insert_tracepoints_1, _("\
4789 Set permission to insert tracepoints in the target."), _("\
4790 Show permission to insert tracepoints in the target."), _("\
4791 When this permission is on, GDB may insert tracepoints in the program.\n\
4792 Otherwise, any sort of insertion attempt will result in an error."),
4793 set_target_permissions, NULL,
4794 &setlist, &showlist);
4795
4796 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4797 &may_insert_fast_tracepoints_1, _("\
4798 Set permission to insert fast tracepoints in the target."), _("\
4799 Show permission to insert fast tracepoints in the target."), _("\
4800 When this permission is on, GDB may insert fast tracepoints.\n\
4801 Otherwise, any sort of insertion attempt will result in an error."),
4802 set_target_permissions, NULL,
4803 &setlist, &showlist);
4804
4805 add_setshow_boolean_cmd ("may-interrupt", class_support,
4806 &may_stop_1, _("\
4807 Set permission to interrupt or signal the target."), _("\
4808 Show permission to interrupt or signal the target."), _("\
4809 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4810 Otherwise, any attempt to interrupt or stop will be ignored."),
4811 set_target_permissions, NULL,
4812 &setlist, &showlist);
4813
4814
4815 target_dcache = dcache_init ();
4816 }
This page took 0.136846 seconds and 4 git commands to generate.