2011-01-11 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46 #include "tracepoint.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static int nosymbol (char *, CORE_ADDR *);
58
59 static void tcomplain (void) ATTRIBUTE_NORETURN;
60
61 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
62
63 static int return_zero (void);
64
65 static int return_one (void);
66
67 static int return_minus_one (void);
68
69 void target_ignore (void);
70
71 static void target_command (char *, int);
72
73 static struct target_ops *find_default_run_target (char *);
74
75 static LONGEST default_xfer_partial (struct target_ops *ops,
76 enum target_object object,
77 const char *annex, gdb_byte *readbuf,
78 const gdb_byte *writebuf,
79 ULONGEST offset, LONGEST len);
80
81 static LONGEST current_xfer_partial (struct target_ops *ops,
82 enum target_object object,
83 const char *annex, gdb_byte *readbuf,
84 const gdb_byte *writebuf,
85 ULONGEST offset, LONGEST len);
86
87 static LONGEST target_xfer_partial (struct target_ops *ops,
88 enum target_object object,
89 const char *annex,
90 void *readbuf, const void *writebuf,
91 ULONGEST offset, LONGEST len);
92
93 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
94 ptid_t ptid);
95
96 static void init_dummy_target (void);
97
98 static struct target_ops debug_target;
99
100 static void debug_to_open (char *, int);
101
102 static void debug_to_prepare_to_store (struct regcache *);
103
104 static void debug_to_files_info (struct target_ops *);
105
106 static int debug_to_insert_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_remove_breakpoint (struct gdbarch *,
110 struct bp_target_info *);
111
112 static int debug_to_can_use_hw_breakpoint (int, int, int);
113
114 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
118 struct bp_target_info *);
119
120 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
124 struct expression *);
125
126 static int debug_to_stopped_by_watchpoint (void);
127
128 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
129
130 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
131 CORE_ADDR, CORE_ADDR, int);
132
133 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
134
135 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
136 struct expression *);
137
138 static void debug_to_terminal_init (void);
139
140 static void debug_to_terminal_inferior (void);
141
142 static void debug_to_terminal_ours_for_output (void);
143
144 static void debug_to_terminal_save_ours (void);
145
146 static void debug_to_terminal_ours (void);
147
148 static void debug_to_terminal_info (char *, int);
149
150 static void debug_to_load (char *, int);
151
152 static int debug_to_lookup_symbol (char *, CORE_ADDR *);
153
154 static int debug_to_can_run (void);
155
156 static void debug_to_notice_signals (ptid_t);
157
158 static void debug_to_stop (ptid_t);
159
160 /* NOTE: cagney/2004-09-29: Many targets reference this variable in
161 wierd and mysterious ways. Putting the variable here lets those
162 wierd and mysterious ways keep building while they are being
163 converted to the inferior inheritance structure. */
164 struct target_ops deprecated_child_ops;
165
166 /* Pointer to array of target architecture structures; the size of the
167 array; the current index into the array; the allocated size of the
168 array. */
169 struct target_ops **target_structs;
170 unsigned target_struct_size;
171 unsigned target_struct_index;
172 unsigned target_struct_allocsize;
173 #define DEFAULT_ALLOCSIZE 10
174
175 /* The initial current target, so that there is always a semi-valid
176 current target. */
177
178 static struct target_ops dummy_target;
179
180 /* Top of target stack. */
181
182 static struct target_ops *target_stack;
183
184 /* The target structure we are currently using to talk to a process
185 or file or whatever "inferior" we have. */
186
187 struct target_ops current_target;
188
189 /* Command list for target. */
190
191 static struct cmd_list_element *targetlist = NULL;
192
193 /* Nonzero if we should trust readonly sections from the
194 executable when reading memory. */
195
196 static int trust_readonly = 0;
197
198 /* Nonzero if we should show true memory content including
199 memory breakpoint inserted by gdb. */
200
201 static int show_memory_breakpoints = 0;
202
203 /* These globals control whether GDB attempts to perform these
204 operations; they are useful for targets that need to prevent
205 inadvertant disruption, such as in non-stop mode. */
206
207 int may_write_registers = 1;
208
209 int may_write_memory = 1;
210
211 int may_insert_breakpoints = 1;
212
213 int may_insert_tracepoints = 1;
214
215 int may_insert_fast_tracepoints = 1;
216
217 int may_stop = 1;
218
219 /* Non-zero if we want to see trace of target level stuff. */
220
221 static int targetdebug = 0;
222 static void
223 show_targetdebug (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225 {
226 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
227 }
228
229 static void setup_target_debug (void);
230
231 /* The option sets this. */
232 static int stack_cache_enabled_p_1 = 1;
233 /* And set_stack_cache_enabled_p updates this.
234 The reason for the separation is so that we don't flush the cache for
235 on->on transitions. */
236 static int stack_cache_enabled_p = 1;
237
238 /* This is called *after* the stack-cache has been set.
239 Flush the cache for off->on and on->off transitions.
240 There's no real need to flush the cache for on->off transitions,
241 except cleanliness. */
242
243 static void
244 set_stack_cache_enabled_p (char *args, int from_tty,
245 struct cmd_list_element *c)
246 {
247 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
248 target_dcache_invalidate ();
249
250 stack_cache_enabled_p = stack_cache_enabled_p_1;
251 }
252
253 static void
254 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
255 struct cmd_list_element *c, const char *value)
256 {
257 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
258 }
259
260 /* Cache of memory operations, to speed up remote access. */
261 static DCACHE *target_dcache;
262
263 /* Invalidate the target dcache. */
264
265 void
266 target_dcache_invalidate (void)
267 {
268 dcache_invalidate (target_dcache);
269 }
270
271 /* The user just typed 'target' without the name of a target. */
272
273 static void
274 target_command (char *arg, int from_tty)
275 {
276 fputs_filtered ("Argument required (target name). Try `help target'\n",
277 gdb_stdout);
278 }
279
280 /* Default target_has_* methods for process_stratum targets. */
281
282 int
283 default_child_has_all_memory (struct target_ops *ops)
284 {
285 /* If no inferior selected, then we can't read memory here. */
286 if (ptid_equal (inferior_ptid, null_ptid))
287 return 0;
288
289 return 1;
290 }
291
292 int
293 default_child_has_memory (struct target_ops *ops)
294 {
295 /* If no inferior selected, then we can't read memory here. */
296 if (ptid_equal (inferior_ptid, null_ptid))
297 return 0;
298
299 return 1;
300 }
301
302 int
303 default_child_has_stack (struct target_ops *ops)
304 {
305 /* If no inferior selected, there's no stack. */
306 if (ptid_equal (inferior_ptid, null_ptid))
307 return 0;
308
309 return 1;
310 }
311
312 int
313 default_child_has_registers (struct target_ops *ops)
314 {
315 /* Can't read registers from no inferior. */
316 if (ptid_equal (inferior_ptid, null_ptid))
317 return 0;
318
319 return 1;
320 }
321
322 int
323 default_child_has_execution (struct target_ops *ops)
324 {
325 /* If there's no thread selected, then we can't make it run through
326 hoops. */
327 if (ptid_equal (inferior_ptid, null_ptid))
328 return 0;
329
330 return 1;
331 }
332
333
334 int
335 target_has_all_memory_1 (void)
336 {
337 struct target_ops *t;
338
339 for (t = current_target.beneath; t != NULL; t = t->beneath)
340 if (t->to_has_all_memory (t))
341 return 1;
342
343 return 0;
344 }
345
346 int
347 target_has_memory_1 (void)
348 {
349 struct target_ops *t;
350
351 for (t = current_target.beneath; t != NULL; t = t->beneath)
352 if (t->to_has_memory (t))
353 return 1;
354
355 return 0;
356 }
357
358 int
359 target_has_stack_1 (void)
360 {
361 struct target_ops *t;
362
363 for (t = current_target.beneath; t != NULL; t = t->beneath)
364 if (t->to_has_stack (t))
365 return 1;
366
367 return 0;
368 }
369
370 int
371 target_has_registers_1 (void)
372 {
373 struct target_ops *t;
374
375 for (t = current_target.beneath; t != NULL; t = t->beneath)
376 if (t->to_has_registers (t))
377 return 1;
378
379 return 0;
380 }
381
382 int
383 target_has_execution_1 (void)
384 {
385 struct target_ops *t;
386
387 for (t = current_target.beneath; t != NULL; t = t->beneath)
388 if (t->to_has_execution (t))
389 return 1;
390
391 return 0;
392 }
393
394 /* Add a possible target architecture to the list. */
395
396 void
397 add_target (struct target_ops *t)
398 {
399 /* Provide default values for all "must have" methods. */
400 if (t->to_xfer_partial == NULL)
401 t->to_xfer_partial = default_xfer_partial;
402
403 if (t->to_has_all_memory == NULL)
404 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
405
406 if (t->to_has_memory == NULL)
407 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
408
409 if (t->to_has_stack == NULL)
410 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
411
412 if (t->to_has_registers == NULL)
413 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
414
415 if (t->to_has_execution == NULL)
416 t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
417
418 if (!target_structs)
419 {
420 target_struct_allocsize = DEFAULT_ALLOCSIZE;
421 target_structs = (struct target_ops **) xmalloc
422 (target_struct_allocsize * sizeof (*target_structs));
423 }
424 if (target_struct_size >= target_struct_allocsize)
425 {
426 target_struct_allocsize *= 2;
427 target_structs = (struct target_ops **)
428 xrealloc ((char *) target_structs,
429 target_struct_allocsize * sizeof (*target_structs));
430 }
431 target_structs[target_struct_size++] = t;
432
433 if (targetlist == NULL)
434 add_prefix_cmd ("target", class_run, target_command, _("\
435 Connect to a target machine or process.\n\
436 The first argument is the type or protocol of the target machine.\n\
437 Remaining arguments are interpreted by the target protocol. For more\n\
438 information on the arguments for a particular protocol, type\n\
439 `help target ' followed by the protocol name."),
440 &targetlist, "target ", 0, &cmdlist);
441 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
442 }
443
444 /* Stub functions */
445
446 void
447 target_ignore (void)
448 {
449 }
450
451 void
452 target_kill (void)
453 {
454 struct target_ops *t;
455
456 for (t = current_target.beneath; t != NULL; t = t->beneath)
457 if (t->to_kill != NULL)
458 {
459 if (targetdebug)
460 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
461
462 t->to_kill (t);
463 return;
464 }
465
466 noprocess ();
467 }
468
469 void
470 target_load (char *arg, int from_tty)
471 {
472 target_dcache_invalidate ();
473 (*current_target.to_load) (arg, from_tty);
474 }
475
476 void
477 target_create_inferior (char *exec_file, char *args,
478 char **env, int from_tty)
479 {
480 struct target_ops *t;
481
482 for (t = current_target.beneath; t != NULL; t = t->beneath)
483 {
484 if (t->to_create_inferior != NULL)
485 {
486 t->to_create_inferior (t, exec_file, args, env, from_tty);
487 if (targetdebug)
488 fprintf_unfiltered (gdb_stdlog,
489 "target_create_inferior (%s, %s, xxx, %d)\n",
490 exec_file, args, from_tty);
491 return;
492 }
493 }
494
495 internal_error (__FILE__, __LINE__,
496 _("could not find a target to create inferior"));
497 }
498
499 void
500 target_terminal_inferior (void)
501 {
502 /* A background resume (``run&'') should leave GDB in control of the
503 terminal. Use target_can_async_p, not target_is_async_p, since at
504 this point the target is not async yet. However, if sync_execution
505 is not set, we know it will become async prior to resume. */
506 if (target_can_async_p () && !sync_execution)
507 return;
508
509 /* If GDB is resuming the inferior in the foreground, install
510 inferior's terminal modes. */
511 (*current_target.to_terminal_inferior) ();
512 }
513
514 static int
515 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
516 struct target_ops *t)
517 {
518 errno = EIO; /* Can't read/write this location. */
519 return 0; /* No bytes handled. */
520 }
521
522 static void
523 tcomplain (void)
524 {
525 error (_("You can't do that when your target is `%s'"),
526 current_target.to_shortname);
527 }
528
529 void
530 noprocess (void)
531 {
532 error (_("You can't do that without a process to debug."));
533 }
534
535 static int
536 nosymbol (char *name, CORE_ADDR *addrp)
537 {
538 return 1; /* Symbol does not exist in target env. */
539 }
540
541 static void
542 default_terminal_info (char *args, int from_tty)
543 {
544 printf_unfiltered (_("No saved terminal information.\n"));
545 }
546
547 /* A default implementation for the to_get_ada_task_ptid target method.
548
549 This function builds the PTID by using both LWP and TID as part of
550 the PTID lwp and tid elements. The pid used is the pid of the
551 inferior_ptid. */
552
553 static ptid_t
554 default_get_ada_task_ptid (long lwp, long tid)
555 {
556 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
557 }
558
559 /* Go through the target stack from top to bottom, copying over zero
560 entries in current_target, then filling in still empty entries. In
561 effect, we are doing class inheritance through the pushed target
562 vectors.
563
564 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
565 is currently implemented, is that it discards any knowledge of
566 which target an inherited method originally belonged to.
567 Consequently, new new target methods should instead explicitly and
568 locally search the target stack for the target that can handle the
569 request. */
570
571 static void
572 update_current_target (void)
573 {
574 struct target_ops *t;
575
576 /* First, reset current's contents. */
577 memset (&current_target, 0, sizeof (current_target));
578
579 #define INHERIT(FIELD, TARGET) \
580 if (!current_target.FIELD) \
581 current_target.FIELD = (TARGET)->FIELD
582
583 for (t = target_stack; t; t = t->beneath)
584 {
585 INHERIT (to_shortname, t);
586 INHERIT (to_longname, t);
587 INHERIT (to_doc, t);
588 /* Do not inherit to_open. */
589 /* Do not inherit to_close. */
590 /* Do not inherit to_attach. */
591 INHERIT (to_post_attach, t);
592 INHERIT (to_attach_no_wait, t);
593 /* Do not inherit to_detach. */
594 /* Do not inherit to_disconnect. */
595 /* Do not inherit to_resume. */
596 /* Do not inherit to_wait. */
597 /* Do not inherit to_fetch_registers. */
598 /* Do not inherit to_store_registers. */
599 INHERIT (to_prepare_to_store, t);
600 INHERIT (deprecated_xfer_memory, t);
601 INHERIT (to_files_info, t);
602 INHERIT (to_insert_breakpoint, t);
603 INHERIT (to_remove_breakpoint, t);
604 INHERIT (to_can_use_hw_breakpoint, t);
605 INHERIT (to_insert_hw_breakpoint, t);
606 INHERIT (to_remove_hw_breakpoint, t);
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 INHERIT (to_stopped_data_address, t);
610 INHERIT (to_have_steppable_watchpoint, t);
611 INHERIT (to_have_continuable_watchpoint, t);
612 INHERIT (to_stopped_by_watchpoint, t);
613 INHERIT (to_watchpoint_addr_within_range, t);
614 INHERIT (to_region_ok_for_hw_watchpoint, t);
615 INHERIT (to_can_accel_watchpoint_condition, t);
616 INHERIT (to_terminal_init, t);
617 INHERIT (to_terminal_inferior, t);
618 INHERIT (to_terminal_ours_for_output, t);
619 INHERIT (to_terminal_ours, t);
620 INHERIT (to_terminal_save_ours, t);
621 INHERIT (to_terminal_info, t);
622 /* Do not inherit to_kill. */
623 INHERIT (to_load, t);
624 INHERIT (to_lookup_symbol, t);
625 /* Do no inherit to_create_inferior. */
626 INHERIT (to_post_startup_inferior, t);
627 INHERIT (to_insert_fork_catchpoint, t);
628 INHERIT (to_remove_fork_catchpoint, t);
629 INHERIT (to_insert_vfork_catchpoint, t);
630 INHERIT (to_remove_vfork_catchpoint, t);
631 /* Do not inherit to_follow_fork. */
632 INHERIT (to_insert_exec_catchpoint, t);
633 INHERIT (to_remove_exec_catchpoint, t);
634 INHERIT (to_set_syscall_catchpoint, t);
635 INHERIT (to_has_exited, t);
636 /* Do not inherit to_mourn_inferior. */
637 INHERIT (to_can_run, t);
638 INHERIT (to_notice_signals, t);
639 /* Do not inherit to_thread_alive. */
640 /* Do not inherit to_find_new_threads. */
641 /* Do not inherit to_pid_to_str. */
642 INHERIT (to_extra_thread_info, t);
643 INHERIT (to_stop, t);
644 /* Do not inherit to_xfer_partial. */
645 INHERIT (to_rcmd, t);
646 INHERIT (to_pid_to_exec_file, t);
647 INHERIT (to_log_command, t);
648 INHERIT (to_stratum, t);
649 /* Do not inherit to_has_all_memory. */
650 /* Do not inherit to_has_memory. */
651 /* Do not inherit to_has_stack. */
652 /* Do not inherit to_has_registers. */
653 /* Do not inherit to_has_execution. */
654 INHERIT (to_has_thread_control, t);
655 INHERIT (to_can_async_p, t);
656 INHERIT (to_is_async_p, t);
657 INHERIT (to_async, t);
658 INHERIT (to_async_mask, t);
659 INHERIT (to_find_memory_regions, t);
660 INHERIT (to_make_corefile_notes, t);
661 INHERIT (to_get_bookmark, t);
662 INHERIT (to_goto_bookmark, t);
663 /* Do not inherit to_get_thread_local_address. */
664 INHERIT (to_can_execute_reverse, t);
665 INHERIT (to_thread_architecture, t);
666 /* Do not inherit to_read_description. */
667 INHERIT (to_get_ada_task_ptid, t);
668 /* Do not inherit to_search_memory. */
669 INHERIT (to_supports_multi_process, t);
670 INHERIT (to_trace_init, t);
671 INHERIT (to_download_tracepoint, t);
672 INHERIT (to_download_trace_state_variable, t);
673 INHERIT (to_trace_set_readonly_regions, t);
674 INHERIT (to_trace_start, t);
675 INHERIT (to_get_trace_status, t);
676 INHERIT (to_trace_stop, t);
677 INHERIT (to_trace_find, t);
678 INHERIT (to_get_trace_state_variable_value, t);
679 INHERIT (to_save_trace_data, t);
680 INHERIT (to_upload_tracepoints, t);
681 INHERIT (to_upload_trace_state_variables, t);
682 INHERIT (to_get_raw_trace_data, t);
683 INHERIT (to_set_disconnected_tracing, t);
684 INHERIT (to_set_circular_trace_buffer, t);
685 INHERIT (to_get_tib_address, t);
686 INHERIT (to_set_permissions, t);
687 INHERIT (to_static_tracepoint_marker_at, t);
688 INHERIT (to_static_tracepoint_markers_by_strid, t);
689 INHERIT (to_magic, t);
690 /* Do not inherit to_memory_map. */
691 /* Do not inherit to_flash_erase. */
692 /* Do not inherit to_flash_done. */
693 }
694 #undef INHERIT
695
696 /* Clean up a target struct so it no longer has any zero pointers in
697 it. Some entries are defaulted to a method that print an error,
698 others are hard-wired to a standard recursive default. */
699
700 #define de_fault(field, value) \
701 if (!current_target.field) \
702 current_target.field = value
703
704 de_fault (to_open,
705 (void (*) (char *, int))
706 tcomplain);
707 de_fault (to_close,
708 (void (*) (int))
709 target_ignore);
710 de_fault (to_post_attach,
711 (void (*) (int))
712 target_ignore);
713 de_fault (to_prepare_to_store,
714 (void (*) (struct regcache *))
715 noprocess);
716 de_fault (deprecated_xfer_memory,
717 (int (*) (CORE_ADDR, gdb_byte *, int, int,
718 struct mem_attrib *, struct target_ops *))
719 nomemory);
720 de_fault (to_files_info,
721 (void (*) (struct target_ops *))
722 target_ignore);
723 de_fault (to_insert_breakpoint,
724 memory_insert_breakpoint);
725 de_fault (to_remove_breakpoint,
726 memory_remove_breakpoint);
727 de_fault (to_can_use_hw_breakpoint,
728 (int (*) (int, int, int))
729 return_zero);
730 de_fault (to_insert_hw_breakpoint,
731 (int (*) (struct gdbarch *, struct bp_target_info *))
732 return_minus_one);
733 de_fault (to_remove_hw_breakpoint,
734 (int (*) (struct gdbarch *, struct bp_target_info *))
735 return_minus_one);
736 de_fault (to_insert_watchpoint,
737 (int (*) (CORE_ADDR, int, int, struct expression *))
738 return_minus_one);
739 de_fault (to_remove_watchpoint,
740 (int (*) (CORE_ADDR, int, int, struct expression *))
741 return_minus_one);
742 de_fault (to_stopped_by_watchpoint,
743 (int (*) (void))
744 return_zero);
745 de_fault (to_stopped_data_address,
746 (int (*) (struct target_ops *, CORE_ADDR *))
747 return_zero);
748 de_fault (to_watchpoint_addr_within_range,
749 default_watchpoint_addr_within_range);
750 de_fault (to_region_ok_for_hw_watchpoint,
751 default_region_ok_for_hw_watchpoint);
752 de_fault (to_can_accel_watchpoint_condition,
753 (int (*) (CORE_ADDR, int, int, struct expression *))
754 return_zero);
755 de_fault (to_terminal_init,
756 (void (*) (void))
757 target_ignore);
758 de_fault (to_terminal_inferior,
759 (void (*) (void))
760 target_ignore);
761 de_fault (to_terminal_ours_for_output,
762 (void (*) (void))
763 target_ignore);
764 de_fault (to_terminal_ours,
765 (void (*) (void))
766 target_ignore);
767 de_fault (to_terminal_save_ours,
768 (void (*) (void))
769 target_ignore);
770 de_fault (to_terminal_info,
771 default_terminal_info);
772 de_fault (to_load,
773 (void (*) (char *, int))
774 tcomplain);
775 de_fault (to_lookup_symbol,
776 (int (*) (char *, CORE_ADDR *))
777 nosymbol);
778 de_fault (to_post_startup_inferior,
779 (void (*) (ptid_t))
780 target_ignore);
781 de_fault (to_insert_fork_catchpoint,
782 (int (*) (int))
783 return_one);
784 de_fault (to_remove_fork_catchpoint,
785 (int (*) (int))
786 return_one);
787 de_fault (to_insert_vfork_catchpoint,
788 (int (*) (int))
789 return_one);
790 de_fault (to_remove_vfork_catchpoint,
791 (int (*) (int))
792 return_one);
793 de_fault (to_insert_exec_catchpoint,
794 (int (*) (int))
795 return_one);
796 de_fault (to_remove_exec_catchpoint,
797 (int (*) (int))
798 return_one);
799 de_fault (to_set_syscall_catchpoint,
800 (int (*) (int, int, int, int, int *))
801 return_one);
802 de_fault (to_has_exited,
803 (int (*) (int, int, int *))
804 return_zero);
805 de_fault (to_can_run,
806 return_zero);
807 de_fault (to_notice_signals,
808 (void (*) (ptid_t))
809 target_ignore);
810 de_fault (to_extra_thread_info,
811 (char *(*) (struct thread_info *))
812 return_zero);
813 de_fault (to_stop,
814 (void (*) (ptid_t))
815 target_ignore);
816 current_target.to_xfer_partial = current_xfer_partial;
817 de_fault (to_rcmd,
818 (void (*) (char *, struct ui_file *))
819 tcomplain);
820 de_fault (to_pid_to_exec_file,
821 (char *(*) (int))
822 return_zero);
823 de_fault (to_async,
824 (void (*) (void (*) (enum inferior_event_type, void*), void*))
825 tcomplain);
826 de_fault (to_async_mask,
827 (int (*) (int))
828 return_one);
829 de_fault (to_thread_architecture,
830 default_thread_architecture);
831 current_target.to_read_description = NULL;
832 de_fault (to_get_ada_task_ptid,
833 (ptid_t (*) (long, long))
834 default_get_ada_task_ptid);
835 de_fault (to_supports_multi_process,
836 (int (*) (void))
837 return_zero);
838 de_fault (to_trace_init,
839 (void (*) (void))
840 tcomplain);
841 de_fault (to_download_tracepoint,
842 (void (*) (struct breakpoint *))
843 tcomplain);
844 de_fault (to_download_trace_state_variable,
845 (void (*) (struct trace_state_variable *))
846 tcomplain);
847 de_fault (to_trace_set_readonly_regions,
848 (void (*) (void))
849 tcomplain);
850 de_fault (to_trace_start,
851 (void (*) (void))
852 tcomplain);
853 de_fault (to_get_trace_status,
854 (int (*) (struct trace_status *))
855 return_minus_one);
856 de_fault (to_trace_stop,
857 (void (*) (void))
858 tcomplain);
859 de_fault (to_trace_find,
860 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
861 return_minus_one);
862 de_fault (to_get_trace_state_variable_value,
863 (int (*) (int, LONGEST *))
864 return_zero);
865 de_fault (to_save_trace_data,
866 (int (*) (const char *))
867 tcomplain);
868 de_fault (to_upload_tracepoints,
869 (int (*) (struct uploaded_tp **))
870 return_zero);
871 de_fault (to_upload_trace_state_variables,
872 (int (*) (struct uploaded_tsv **))
873 return_zero);
874 de_fault (to_get_raw_trace_data,
875 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
876 tcomplain);
877 de_fault (to_set_disconnected_tracing,
878 (void (*) (int))
879 target_ignore);
880 de_fault (to_set_circular_trace_buffer,
881 (void (*) (int))
882 target_ignore);
883 de_fault (to_get_tib_address,
884 (int (*) (ptid_t, CORE_ADDR *))
885 tcomplain);
886 de_fault (to_set_permissions,
887 (void (*) (void))
888 target_ignore);
889 de_fault (to_static_tracepoint_marker_at,
890 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
891 return_zero);
892 de_fault (to_static_tracepoint_markers_by_strid,
893 (VEC(static_tracepoint_marker_p) * (*) (const char *))
894 tcomplain);
895 #undef de_fault
896
897 /* Finally, position the target-stack beneath the squashed
898 "current_target". That way code looking for a non-inherited
899 target method can quickly and simply find it. */
900 current_target.beneath = target_stack;
901
902 if (targetdebug)
903 setup_target_debug ();
904 }
905
906 /* Push a new target type into the stack of the existing target accessors,
907 possibly superseding some of the existing accessors.
908
909 Rather than allow an empty stack, we always have the dummy target at
910 the bottom stratum, so we can call the function vectors without
911 checking them. */
912
913 void
914 push_target (struct target_ops *t)
915 {
916 struct target_ops **cur;
917
918 /* Check magic number. If wrong, it probably means someone changed
919 the struct definition, but not all the places that initialize one. */
920 if (t->to_magic != OPS_MAGIC)
921 {
922 fprintf_unfiltered (gdb_stderr,
923 "Magic number of %s target struct wrong\n",
924 t->to_shortname);
925 internal_error (__FILE__, __LINE__,
926 _("failed internal consistency check"));
927 }
928
929 /* Find the proper stratum to install this target in. */
930 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
931 {
932 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
933 break;
934 }
935
936 /* If there's already targets at this stratum, remove them. */
937 /* FIXME: cagney/2003-10-15: I think this should be popping all
938 targets to CUR, and not just those at this stratum level. */
939 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
940 {
941 /* There's already something at this stratum level. Close it,
942 and un-hook it from the stack. */
943 struct target_ops *tmp = (*cur);
944
945 (*cur) = (*cur)->beneath;
946 tmp->beneath = NULL;
947 target_close (tmp, 0);
948 }
949
950 /* We have removed all targets in our stratum, now add the new one. */
951 t->beneath = (*cur);
952 (*cur) = t;
953
954 update_current_target ();
955 }
956
957 /* Remove a target_ops vector from the stack, wherever it may be.
958 Return how many times it was removed (0 or 1). */
959
960 int
961 unpush_target (struct target_ops *t)
962 {
963 struct target_ops **cur;
964 struct target_ops *tmp;
965
966 if (t->to_stratum == dummy_stratum)
967 internal_error (__FILE__, __LINE__,
968 _("Attempt to unpush the dummy target"));
969
970 /* Look for the specified target. Note that we assume that a target
971 can only occur once in the target stack. */
972
973 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
974 {
975 if ((*cur) == t)
976 break;
977 }
978
979 if ((*cur) == NULL)
980 return 0; /* Didn't find target_ops, quit now. */
981
982 /* NOTE: cagney/2003-12-06: In '94 the close call was made
983 unconditional by moving it to before the above check that the
984 target was in the target stack (something about "Change the way
985 pushing and popping of targets work to support target overlays
986 and inheritance"). This doesn't make much sense - only open
987 targets should be closed. */
988 target_close (t, 0);
989
990 /* Unchain the target. */
991 tmp = (*cur);
992 (*cur) = (*cur)->beneath;
993 tmp->beneath = NULL;
994
995 update_current_target ();
996
997 return 1;
998 }
999
1000 void
1001 pop_target (void)
1002 {
1003 target_close (target_stack, 0); /* Let it clean up. */
1004 if (unpush_target (target_stack) == 1)
1005 return;
1006
1007 fprintf_unfiltered (gdb_stderr,
1008 "pop_target couldn't find target %s\n",
1009 current_target.to_shortname);
1010 internal_error (__FILE__, __LINE__,
1011 _("failed internal consistency check"));
1012 }
1013
1014 void
1015 pop_all_targets_above (enum strata above_stratum, int quitting)
1016 {
1017 while ((int) (current_target.to_stratum) > (int) above_stratum)
1018 {
1019 target_close (target_stack, quitting);
1020 if (!unpush_target (target_stack))
1021 {
1022 fprintf_unfiltered (gdb_stderr,
1023 "pop_all_targets couldn't find target %s\n",
1024 target_stack->to_shortname);
1025 internal_error (__FILE__, __LINE__,
1026 _("failed internal consistency check"));
1027 break;
1028 }
1029 }
1030 }
1031
1032 void
1033 pop_all_targets (int quitting)
1034 {
1035 pop_all_targets_above (dummy_stratum, quitting);
1036 }
1037
1038 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1039
1040 int
1041 target_is_pushed (struct target_ops *t)
1042 {
1043 struct target_ops **cur;
1044
1045 /* Check magic number. If wrong, it probably means someone changed
1046 the struct definition, but not all the places that initialize one. */
1047 if (t->to_magic != OPS_MAGIC)
1048 {
1049 fprintf_unfiltered (gdb_stderr,
1050 "Magic number of %s target struct wrong\n",
1051 t->to_shortname);
1052 internal_error (__FILE__, __LINE__,
1053 _("failed internal consistency check"));
1054 }
1055
1056 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1057 if (*cur == t)
1058 return 1;
1059
1060 return 0;
1061 }
1062
1063 /* Using the objfile specified in OBJFILE, find the address for the
1064 current thread's thread-local storage with offset OFFSET. */
1065 CORE_ADDR
1066 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1067 {
1068 volatile CORE_ADDR addr = 0;
1069 struct target_ops *target;
1070
1071 for (target = current_target.beneath;
1072 target != NULL;
1073 target = target->beneath)
1074 {
1075 if (target->to_get_thread_local_address != NULL)
1076 break;
1077 }
1078
1079 if (target != NULL
1080 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1081 {
1082 ptid_t ptid = inferior_ptid;
1083 volatile struct gdb_exception ex;
1084
1085 TRY_CATCH (ex, RETURN_MASK_ALL)
1086 {
1087 CORE_ADDR lm_addr;
1088
1089 /* Fetch the load module address for this objfile. */
1090 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1091 objfile);
1092 /* If it's 0, throw the appropriate exception. */
1093 if (lm_addr == 0)
1094 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1095 _("TLS load module not found"));
1096
1097 addr = target->to_get_thread_local_address (target, ptid,
1098 lm_addr, offset);
1099 }
1100 /* If an error occurred, print TLS related messages here. Otherwise,
1101 throw the error to some higher catcher. */
1102 if (ex.reason < 0)
1103 {
1104 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1105
1106 switch (ex.error)
1107 {
1108 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1109 error (_("Cannot find thread-local variables "
1110 "in this thread library."));
1111 break;
1112 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1113 if (objfile_is_library)
1114 error (_("Cannot find shared library `%s' in dynamic"
1115 " linker's load module list"), objfile->name);
1116 else
1117 error (_("Cannot find executable file `%s' in dynamic"
1118 " linker's load module list"), objfile->name);
1119 break;
1120 case TLS_NOT_ALLOCATED_YET_ERROR:
1121 if (objfile_is_library)
1122 error (_("The inferior has not yet allocated storage for"
1123 " thread-local variables in\n"
1124 "the shared library `%s'\n"
1125 "for %s"),
1126 objfile->name, target_pid_to_str (ptid));
1127 else
1128 error (_("The inferior has not yet allocated storage for"
1129 " thread-local variables in\n"
1130 "the executable `%s'\n"
1131 "for %s"),
1132 objfile->name, target_pid_to_str (ptid));
1133 break;
1134 case TLS_GENERIC_ERROR:
1135 if (objfile_is_library)
1136 error (_("Cannot find thread-local storage for %s, "
1137 "shared library %s:\n%s"),
1138 target_pid_to_str (ptid),
1139 objfile->name, ex.message);
1140 else
1141 error (_("Cannot find thread-local storage for %s, "
1142 "executable file %s:\n%s"),
1143 target_pid_to_str (ptid),
1144 objfile->name, ex.message);
1145 break;
1146 default:
1147 throw_exception (ex);
1148 break;
1149 }
1150 }
1151 }
1152 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1153 TLS is an ABI-specific thing. But we don't do that yet. */
1154 else
1155 error (_("Cannot find thread-local variables on this target"));
1156
1157 return addr;
1158 }
1159
1160 #undef MIN
1161 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1162
1163 /* target_read_string -- read a null terminated string, up to LEN bytes,
1164 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1165 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1166 is responsible for freeing it. Return the number of bytes successfully
1167 read. */
1168
1169 int
1170 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1171 {
1172 int tlen, origlen, offset, i;
1173 gdb_byte buf[4];
1174 int errcode = 0;
1175 char *buffer;
1176 int buffer_allocated;
1177 char *bufptr;
1178 unsigned int nbytes_read = 0;
1179
1180 gdb_assert (string);
1181
1182 /* Small for testing. */
1183 buffer_allocated = 4;
1184 buffer = xmalloc (buffer_allocated);
1185 bufptr = buffer;
1186
1187 origlen = len;
1188
1189 while (len > 0)
1190 {
1191 tlen = MIN (len, 4 - (memaddr & 3));
1192 offset = memaddr & 3;
1193
1194 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1195 if (errcode != 0)
1196 {
1197 /* The transfer request might have crossed the boundary to an
1198 unallocated region of memory. Retry the transfer, requesting
1199 a single byte. */
1200 tlen = 1;
1201 offset = 0;
1202 errcode = target_read_memory (memaddr, buf, 1);
1203 if (errcode != 0)
1204 goto done;
1205 }
1206
1207 if (bufptr - buffer + tlen > buffer_allocated)
1208 {
1209 unsigned int bytes;
1210
1211 bytes = bufptr - buffer;
1212 buffer_allocated *= 2;
1213 buffer = xrealloc (buffer, buffer_allocated);
1214 bufptr = buffer + bytes;
1215 }
1216
1217 for (i = 0; i < tlen; i++)
1218 {
1219 *bufptr++ = buf[i + offset];
1220 if (buf[i + offset] == '\000')
1221 {
1222 nbytes_read += i + 1;
1223 goto done;
1224 }
1225 }
1226
1227 memaddr += tlen;
1228 len -= tlen;
1229 nbytes_read += tlen;
1230 }
1231 done:
1232 *string = buffer;
1233 if (errnop != NULL)
1234 *errnop = errcode;
1235 return nbytes_read;
1236 }
1237
1238 struct target_section_table *
1239 target_get_section_table (struct target_ops *target)
1240 {
1241 struct target_ops *t;
1242
1243 if (targetdebug)
1244 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1245
1246 for (t = target; t != NULL; t = t->beneath)
1247 if (t->to_get_section_table != NULL)
1248 return (*t->to_get_section_table) (t);
1249
1250 return NULL;
1251 }
1252
1253 /* Find a section containing ADDR. */
1254
1255 struct target_section *
1256 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1257 {
1258 struct target_section_table *table = target_get_section_table (target);
1259 struct target_section *secp;
1260
1261 if (table == NULL)
1262 return NULL;
1263
1264 for (secp = table->sections; secp < table->sections_end; secp++)
1265 {
1266 if (addr >= secp->addr && addr < secp->endaddr)
1267 return secp;
1268 }
1269 return NULL;
1270 }
1271
1272 /* Perform a partial memory transfer.
1273 For docs see target.h, to_xfer_partial. */
1274
1275 static LONGEST
1276 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1277 void *readbuf, const void *writebuf, ULONGEST memaddr,
1278 LONGEST len)
1279 {
1280 LONGEST res;
1281 int reg_len;
1282 struct mem_region *region;
1283 struct inferior *inf;
1284
1285 /* Zero length requests are ok and require no work. */
1286 if (len == 0)
1287 return 0;
1288
1289 /* For accesses to unmapped overlay sections, read directly from
1290 files. Must do this first, as MEMADDR may need adjustment. */
1291 if (readbuf != NULL && overlay_debugging)
1292 {
1293 struct obj_section *section = find_pc_overlay (memaddr);
1294
1295 if (pc_in_unmapped_range (memaddr, section))
1296 {
1297 struct target_section_table *table
1298 = target_get_section_table (ops);
1299 const char *section_name = section->the_bfd_section->name;
1300
1301 memaddr = overlay_mapped_address (memaddr, section);
1302 return section_table_xfer_memory_partial (readbuf, writebuf,
1303 memaddr, len,
1304 table->sections,
1305 table->sections_end,
1306 section_name);
1307 }
1308 }
1309
1310 /* Try the executable files, if "trust-readonly-sections" is set. */
1311 if (readbuf != NULL && trust_readonly)
1312 {
1313 struct target_section *secp;
1314 struct target_section_table *table;
1315
1316 secp = target_section_by_addr (ops, memaddr);
1317 if (secp != NULL
1318 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1319 & SEC_READONLY))
1320 {
1321 table = target_get_section_table (ops);
1322 return section_table_xfer_memory_partial (readbuf, writebuf,
1323 memaddr, len,
1324 table->sections,
1325 table->sections_end,
1326 NULL);
1327 }
1328 }
1329
1330 /* Try GDB's internal data cache. */
1331 region = lookup_mem_region (memaddr);
1332 /* region->hi == 0 means there's no upper bound. */
1333 if (memaddr + len < region->hi || region->hi == 0)
1334 reg_len = len;
1335 else
1336 reg_len = region->hi - memaddr;
1337
1338 switch (region->attrib.mode)
1339 {
1340 case MEM_RO:
1341 if (writebuf != NULL)
1342 return -1;
1343 break;
1344
1345 case MEM_WO:
1346 if (readbuf != NULL)
1347 return -1;
1348 break;
1349
1350 case MEM_FLASH:
1351 /* We only support writing to flash during "load" for now. */
1352 if (writebuf != NULL)
1353 error (_("Writing to flash memory forbidden in this context"));
1354 break;
1355
1356 case MEM_NONE:
1357 return -1;
1358 }
1359
1360 if (!ptid_equal (inferior_ptid, null_ptid))
1361 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1362 else
1363 inf = NULL;
1364
1365 if (inf != NULL
1366 /* The dcache reads whole cache lines; that doesn't play well
1367 with reading from a trace buffer, because reading outside of
1368 the collected memory range fails. */
1369 && get_traceframe_number () == -1
1370 && (region->attrib.cache
1371 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1372 {
1373 if (readbuf != NULL)
1374 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1375 reg_len, 0);
1376 else
1377 /* FIXME drow/2006-08-09: If we're going to preserve const
1378 correctness dcache_xfer_memory should take readbuf and
1379 writebuf. */
1380 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1381 (void *) writebuf,
1382 reg_len, 1);
1383 if (res <= 0)
1384 return -1;
1385 else
1386 {
1387 if (readbuf && !show_memory_breakpoints)
1388 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1389 return res;
1390 }
1391 }
1392
1393 /* If none of those methods found the memory we wanted, fall back
1394 to a target partial transfer. Normally a single call to
1395 to_xfer_partial is enough; if it doesn't recognize an object
1396 it will call the to_xfer_partial of the next target down.
1397 But for memory this won't do. Memory is the only target
1398 object which can be read from more than one valid target.
1399 A core file, for instance, could have some of memory but
1400 delegate other bits to the target below it. So, we must
1401 manually try all targets. */
1402
1403 do
1404 {
1405 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1406 readbuf, writebuf, memaddr, reg_len);
1407 if (res > 0)
1408 break;
1409
1410 /* We want to continue past core files to executables, but not
1411 past a running target's memory. */
1412 if (ops->to_has_all_memory (ops))
1413 break;
1414
1415 ops = ops->beneath;
1416 }
1417 while (ops != NULL);
1418
1419 if (readbuf && !show_memory_breakpoints)
1420 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1421
1422 /* Make sure the cache gets updated no matter what - if we are writing
1423 to the stack. Even if this write is not tagged as such, we still need
1424 to update the cache. */
1425
1426 if (res > 0
1427 && inf != NULL
1428 && writebuf != NULL
1429 && !region->attrib.cache
1430 && stack_cache_enabled_p
1431 && object != TARGET_OBJECT_STACK_MEMORY)
1432 {
1433 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1434 }
1435
1436 /* If we still haven't got anything, return the last error. We
1437 give up. */
1438 return res;
1439 }
1440
1441 static void
1442 restore_show_memory_breakpoints (void *arg)
1443 {
1444 show_memory_breakpoints = (uintptr_t) arg;
1445 }
1446
1447 struct cleanup *
1448 make_show_memory_breakpoints_cleanup (int show)
1449 {
1450 int current = show_memory_breakpoints;
1451
1452 show_memory_breakpoints = show;
1453 return make_cleanup (restore_show_memory_breakpoints,
1454 (void *) (uintptr_t) current);
1455 }
1456
1457 /* For docs see target.h, to_xfer_partial. */
1458
1459 static LONGEST
1460 target_xfer_partial (struct target_ops *ops,
1461 enum target_object object, const char *annex,
1462 void *readbuf, const void *writebuf,
1463 ULONGEST offset, LONGEST len)
1464 {
1465 LONGEST retval;
1466
1467 gdb_assert (ops->to_xfer_partial != NULL);
1468
1469 if (writebuf && !may_write_memory)
1470 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1471 core_addr_to_string_nz (offset), plongest (len));
1472
1473 /* If this is a memory transfer, let the memory-specific code
1474 have a look at it instead. Memory transfers are more
1475 complicated. */
1476 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1477 retval = memory_xfer_partial (ops, object, readbuf,
1478 writebuf, offset, len);
1479 else
1480 {
1481 enum target_object raw_object = object;
1482
1483 /* If this is a raw memory transfer, request the normal
1484 memory object from other layers. */
1485 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1486 raw_object = TARGET_OBJECT_MEMORY;
1487
1488 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1489 writebuf, offset, len);
1490 }
1491
1492 if (targetdebug)
1493 {
1494 const unsigned char *myaddr = NULL;
1495
1496 fprintf_unfiltered (gdb_stdlog,
1497 "%s:target_xfer_partial "
1498 "(%d, %s, %s, %s, %s, %s) = %s",
1499 ops->to_shortname,
1500 (int) object,
1501 (annex ? annex : "(null)"),
1502 host_address_to_string (readbuf),
1503 host_address_to_string (writebuf),
1504 core_addr_to_string_nz (offset),
1505 plongest (len), plongest (retval));
1506
1507 if (readbuf)
1508 myaddr = readbuf;
1509 if (writebuf)
1510 myaddr = writebuf;
1511 if (retval > 0 && myaddr != NULL)
1512 {
1513 int i;
1514
1515 fputs_unfiltered (", bytes =", gdb_stdlog);
1516 for (i = 0; i < retval; i++)
1517 {
1518 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1519 {
1520 if (targetdebug < 2 && i > 0)
1521 {
1522 fprintf_unfiltered (gdb_stdlog, " ...");
1523 break;
1524 }
1525 fprintf_unfiltered (gdb_stdlog, "\n");
1526 }
1527
1528 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1529 }
1530 }
1531
1532 fputc_unfiltered ('\n', gdb_stdlog);
1533 }
1534 return retval;
1535 }
1536
1537 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1538 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1539 if any error occurs.
1540
1541 If an error occurs, no guarantee is made about the contents of the data at
1542 MYADDR. In particular, the caller should not depend upon partial reads
1543 filling the buffer with good data. There is no way for the caller to know
1544 how much good data might have been transfered anyway. Callers that can
1545 deal with partial reads should call target_read (which will retry until
1546 it makes no progress, and then return how much was transferred). */
1547
1548 int
1549 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1550 {
1551 /* Dispatch to the topmost target, not the flattened current_target.
1552 Memory accesses check target->to_has_(all_)memory, and the
1553 flattened target doesn't inherit those. */
1554 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1555 myaddr, memaddr, len) == len)
1556 return 0;
1557 else
1558 return EIO;
1559 }
1560
1561 /* Like target_read_memory, but specify explicitly that this is a read from
1562 the target's stack. This may trigger different cache behavior. */
1563
1564 int
1565 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1566 {
1567 /* Dispatch to the topmost target, not the flattened current_target.
1568 Memory accesses check target->to_has_(all_)memory, and the
1569 flattened target doesn't inherit those. */
1570
1571 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1572 myaddr, memaddr, len) == len)
1573 return 0;
1574 else
1575 return EIO;
1576 }
1577
1578 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1579 Returns either 0 for success or an errno value if any error occurs.
1580 If an error occurs, no guarantee is made about how much data got written.
1581 Callers that can deal with partial writes should call target_write. */
1582
1583 int
1584 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1585 {
1586 /* Dispatch to the topmost target, not the flattened current_target.
1587 Memory accesses check target->to_has_(all_)memory, and the
1588 flattened target doesn't inherit those. */
1589 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1590 myaddr, memaddr, len) == len)
1591 return 0;
1592 else
1593 return EIO;
1594 }
1595
1596 /* Fetch the target's memory map. */
1597
1598 VEC(mem_region_s) *
1599 target_memory_map (void)
1600 {
1601 VEC(mem_region_s) *result;
1602 struct mem_region *last_one, *this_one;
1603 int ix;
1604 struct target_ops *t;
1605
1606 if (targetdebug)
1607 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1608
1609 for (t = current_target.beneath; t != NULL; t = t->beneath)
1610 if (t->to_memory_map != NULL)
1611 break;
1612
1613 if (t == NULL)
1614 return NULL;
1615
1616 result = t->to_memory_map (t);
1617 if (result == NULL)
1618 return NULL;
1619
1620 qsort (VEC_address (mem_region_s, result),
1621 VEC_length (mem_region_s, result),
1622 sizeof (struct mem_region), mem_region_cmp);
1623
1624 /* Check that regions do not overlap. Simultaneously assign
1625 a numbering for the "mem" commands to use to refer to
1626 each region. */
1627 last_one = NULL;
1628 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1629 {
1630 this_one->number = ix;
1631
1632 if (last_one && last_one->hi > this_one->lo)
1633 {
1634 warning (_("Overlapping regions in memory map: ignoring"));
1635 VEC_free (mem_region_s, result);
1636 return NULL;
1637 }
1638 last_one = this_one;
1639 }
1640
1641 return result;
1642 }
1643
1644 void
1645 target_flash_erase (ULONGEST address, LONGEST length)
1646 {
1647 struct target_ops *t;
1648
1649 for (t = current_target.beneath; t != NULL; t = t->beneath)
1650 if (t->to_flash_erase != NULL)
1651 {
1652 if (targetdebug)
1653 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1654 hex_string (address), phex (length, 0));
1655 t->to_flash_erase (t, address, length);
1656 return;
1657 }
1658
1659 tcomplain ();
1660 }
1661
1662 void
1663 target_flash_done (void)
1664 {
1665 struct target_ops *t;
1666
1667 for (t = current_target.beneath; t != NULL; t = t->beneath)
1668 if (t->to_flash_done != NULL)
1669 {
1670 if (targetdebug)
1671 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1672 t->to_flash_done (t);
1673 return;
1674 }
1675
1676 tcomplain ();
1677 }
1678
1679 static void
1680 show_trust_readonly (struct ui_file *file, int from_tty,
1681 struct cmd_list_element *c, const char *value)
1682 {
1683 fprintf_filtered (file,
1684 _("Mode for reading from readonly sections is %s.\n"),
1685 value);
1686 }
1687
1688 /* More generic transfers. */
1689
1690 static LONGEST
1691 default_xfer_partial (struct target_ops *ops, enum target_object object,
1692 const char *annex, gdb_byte *readbuf,
1693 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1694 {
1695 if (object == TARGET_OBJECT_MEMORY
1696 && ops->deprecated_xfer_memory != NULL)
1697 /* If available, fall back to the target's
1698 "deprecated_xfer_memory" method. */
1699 {
1700 int xfered = -1;
1701
1702 errno = 0;
1703 if (writebuf != NULL)
1704 {
1705 void *buffer = xmalloc (len);
1706 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1707
1708 memcpy (buffer, writebuf, len);
1709 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1710 1/*write*/, NULL, ops);
1711 do_cleanups (cleanup);
1712 }
1713 if (readbuf != NULL)
1714 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1715 0/*read*/, NULL, ops);
1716 if (xfered > 0)
1717 return xfered;
1718 else if (xfered == 0 && errno == 0)
1719 /* "deprecated_xfer_memory" uses 0, cross checked against
1720 ERRNO as one indication of an error. */
1721 return 0;
1722 else
1723 return -1;
1724 }
1725 else if (ops->beneath != NULL)
1726 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1727 readbuf, writebuf, offset, len);
1728 else
1729 return -1;
1730 }
1731
1732 /* The xfer_partial handler for the topmost target. Unlike the default,
1733 it does not need to handle memory specially; it just passes all
1734 requests down the stack. */
1735
1736 static LONGEST
1737 current_xfer_partial (struct target_ops *ops, enum target_object object,
1738 const char *annex, gdb_byte *readbuf,
1739 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1740 {
1741 if (ops->beneath != NULL)
1742 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1743 readbuf, writebuf, offset, len);
1744 else
1745 return -1;
1746 }
1747
1748 /* Target vector read/write partial wrapper functions. */
1749
1750 static LONGEST
1751 target_read_partial (struct target_ops *ops,
1752 enum target_object object,
1753 const char *annex, gdb_byte *buf,
1754 ULONGEST offset, LONGEST len)
1755 {
1756 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1757 }
1758
1759 static LONGEST
1760 target_write_partial (struct target_ops *ops,
1761 enum target_object object,
1762 const char *annex, const gdb_byte *buf,
1763 ULONGEST offset, LONGEST len)
1764 {
1765 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1766 }
1767
1768 /* Wrappers to perform the full transfer. */
1769
1770 /* For docs on target_read see target.h. */
1771
1772 LONGEST
1773 target_read (struct target_ops *ops,
1774 enum target_object object,
1775 const char *annex, gdb_byte *buf,
1776 ULONGEST offset, LONGEST len)
1777 {
1778 LONGEST xfered = 0;
1779
1780 while (xfered < len)
1781 {
1782 LONGEST xfer = target_read_partial (ops, object, annex,
1783 (gdb_byte *) buf + xfered,
1784 offset + xfered, len - xfered);
1785
1786 /* Call an observer, notifying them of the xfer progress? */
1787 if (xfer == 0)
1788 return xfered;
1789 if (xfer < 0)
1790 return -1;
1791 xfered += xfer;
1792 QUIT;
1793 }
1794 return len;
1795 }
1796
1797 /** Assuming that the entire [begin, end) range of memory cannot be read,
1798 try to read whatever subrange is possible to read.
1799
1800 The function results, in RESULT, either zero or one memory block.
1801 If there's a readable subrange at the beginning, it is completely
1802 read and returned. Any further readable subrange will not be read.
1803 Otherwise, if there's a readable subrange at the end, it will be
1804 completely read and returned. Any readable subranges before it (obviously,
1805 not starting at the beginning), will be ignored. In other cases --
1806 either no readable subrange, or readable subrange (s) that is neither
1807 at the beginning, or end, nothing is returned.
1808
1809 The purpose of this function is to handle a read across a boundary of
1810 accessible memory in a case when memory map is not available. The above
1811 restrictions are fine for this case, but will give incorrect results if
1812 the memory is 'patchy'. However, supporting 'patchy' memory would require
1813 trying to read every single byte, and it seems unacceptable solution.
1814 Explicit memory map is recommended for this case -- and
1815 target_read_memory_robust will take care of reading multiple ranges
1816 then. */
1817
1818 static void
1819 read_whatever_is_readable (struct target_ops *ops,
1820 ULONGEST begin, ULONGEST end,
1821 VEC(memory_read_result_s) **result)
1822 {
1823 gdb_byte *buf = xmalloc (end-begin);
1824 ULONGEST current_begin = begin;
1825 ULONGEST current_end = end;
1826 int forward;
1827 memory_read_result_s r;
1828
1829 /* If we previously failed to read 1 byte, nothing can be done here. */
1830 if (end - begin <= 1)
1831 return;
1832
1833 /* Check that either first or the last byte is readable, and give up
1834 if not. This heuristic is meant to permit reading accessible memory
1835 at the boundary of accessible region. */
1836 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1837 buf, begin, 1) == 1)
1838 {
1839 forward = 1;
1840 ++current_begin;
1841 }
1842 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1843 buf + (end-begin) - 1, end - 1, 1) == 1)
1844 {
1845 forward = 0;
1846 --current_end;
1847 }
1848 else
1849 {
1850 return;
1851 }
1852
1853 /* Loop invariant is that the [current_begin, current_end) was previously
1854 found to be not readable as a whole.
1855
1856 Note loop condition -- if the range has 1 byte, we can't divide the range
1857 so there's no point trying further. */
1858 while (current_end - current_begin > 1)
1859 {
1860 ULONGEST first_half_begin, first_half_end;
1861 ULONGEST second_half_begin, second_half_end;
1862 LONGEST xfer;
1863
1864 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1865 if (forward)
1866 {
1867 first_half_begin = current_begin;
1868 first_half_end = middle;
1869 second_half_begin = middle;
1870 second_half_end = current_end;
1871 }
1872 else
1873 {
1874 first_half_begin = middle;
1875 first_half_end = current_end;
1876 second_half_begin = current_begin;
1877 second_half_end = middle;
1878 }
1879
1880 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1881 buf + (first_half_begin - begin),
1882 first_half_begin,
1883 first_half_end - first_half_begin);
1884
1885 if (xfer == first_half_end - first_half_begin)
1886 {
1887 /* This half reads up fine. So, the error must be in the
1888 other half. */
1889 current_begin = second_half_begin;
1890 current_end = second_half_end;
1891 }
1892 else
1893 {
1894 /* This half is not readable. Because we've tried one byte, we
1895 know some part of this half if actually redable. Go to the next
1896 iteration to divide again and try to read.
1897
1898 We don't handle the other half, because this function only tries
1899 to read a single readable subrange. */
1900 current_begin = first_half_begin;
1901 current_end = first_half_end;
1902 }
1903 }
1904
1905 if (forward)
1906 {
1907 /* The [begin, current_begin) range has been read. */
1908 r.begin = begin;
1909 r.end = current_begin;
1910 r.data = buf;
1911 }
1912 else
1913 {
1914 /* The [current_end, end) range has been read. */
1915 LONGEST rlen = end - current_end;
1916 r.data = xmalloc (rlen);
1917 memcpy (r.data, buf + current_end - begin, rlen);
1918 r.begin = current_end;
1919 r.end = end;
1920 xfree (buf);
1921 }
1922 VEC_safe_push(memory_read_result_s, (*result), &r);
1923 }
1924
1925 void
1926 free_memory_read_result_vector (void *x)
1927 {
1928 VEC(memory_read_result_s) *v = x;
1929 memory_read_result_s *current;
1930 int ix;
1931
1932 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1933 {
1934 xfree (current->data);
1935 }
1936 VEC_free (memory_read_result_s, v);
1937 }
1938
1939 VEC(memory_read_result_s) *
1940 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1941 {
1942 VEC(memory_read_result_s) *result = 0;
1943
1944 LONGEST xfered = 0;
1945 while (xfered < len)
1946 {
1947 struct mem_region *region = lookup_mem_region (offset + xfered);
1948 LONGEST rlen;
1949
1950 /* If there is no explicit region, a fake one should be created. */
1951 gdb_assert (region);
1952
1953 if (region->hi == 0)
1954 rlen = len - xfered;
1955 else
1956 rlen = region->hi - offset;
1957
1958 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1959 {
1960 /* Cannot read this region. Note that we can end up here only
1961 if the region is explicitly marked inaccessible, or
1962 'inaccessible-by-default' is in effect. */
1963 xfered += rlen;
1964 }
1965 else
1966 {
1967 LONGEST to_read = min (len - xfered, rlen);
1968 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1969
1970 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1971 (gdb_byte *) buffer,
1972 offset + xfered, to_read);
1973 /* Call an observer, notifying them of the xfer progress? */
1974 if (xfer <= 0)
1975 {
1976 /* Got an error reading full chunk. See if maybe we can read
1977 some subrange. */
1978 xfree (buffer);
1979 read_whatever_is_readable (ops, offset + xfered,
1980 offset + xfered + to_read, &result);
1981 xfered += to_read;
1982 }
1983 else
1984 {
1985 struct memory_read_result r;
1986 r.data = buffer;
1987 r.begin = offset + xfered;
1988 r.end = r.begin + xfer;
1989 VEC_safe_push (memory_read_result_s, result, &r);
1990 xfered += xfer;
1991 }
1992 QUIT;
1993 }
1994 }
1995 return result;
1996 }
1997
1998
1999 /* An alternative to target_write with progress callbacks. */
2000
2001 LONGEST
2002 target_write_with_progress (struct target_ops *ops,
2003 enum target_object object,
2004 const char *annex, const gdb_byte *buf,
2005 ULONGEST offset, LONGEST len,
2006 void (*progress) (ULONGEST, void *), void *baton)
2007 {
2008 LONGEST xfered = 0;
2009
2010 /* Give the progress callback a chance to set up. */
2011 if (progress)
2012 (*progress) (0, baton);
2013
2014 while (xfered < len)
2015 {
2016 LONGEST xfer = target_write_partial (ops, object, annex,
2017 (gdb_byte *) buf + xfered,
2018 offset + xfered, len - xfered);
2019
2020 if (xfer == 0)
2021 return xfered;
2022 if (xfer < 0)
2023 return -1;
2024
2025 if (progress)
2026 (*progress) (xfer, baton);
2027
2028 xfered += xfer;
2029 QUIT;
2030 }
2031 return len;
2032 }
2033
2034 /* For docs on target_write see target.h. */
2035
2036 LONGEST
2037 target_write (struct target_ops *ops,
2038 enum target_object object,
2039 const char *annex, const gdb_byte *buf,
2040 ULONGEST offset, LONGEST len)
2041 {
2042 return target_write_with_progress (ops, object, annex, buf, offset, len,
2043 NULL, NULL);
2044 }
2045
2046 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2047 the size of the transferred data. PADDING additional bytes are
2048 available in *BUF_P. This is a helper function for
2049 target_read_alloc; see the declaration of that function for more
2050 information. */
2051
2052 static LONGEST
2053 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2054 const char *annex, gdb_byte **buf_p, int padding)
2055 {
2056 size_t buf_alloc, buf_pos;
2057 gdb_byte *buf;
2058 LONGEST n;
2059
2060 /* This function does not have a length parameter; it reads the
2061 entire OBJECT). Also, it doesn't support objects fetched partly
2062 from one target and partly from another (in a different stratum,
2063 e.g. a core file and an executable). Both reasons make it
2064 unsuitable for reading memory. */
2065 gdb_assert (object != TARGET_OBJECT_MEMORY);
2066
2067 /* Start by reading up to 4K at a time. The target will throttle
2068 this number down if necessary. */
2069 buf_alloc = 4096;
2070 buf = xmalloc (buf_alloc);
2071 buf_pos = 0;
2072 while (1)
2073 {
2074 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2075 buf_pos, buf_alloc - buf_pos - padding);
2076 if (n < 0)
2077 {
2078 /* An error occurred. */
2079 xfree (buf);
2080 return -1;
2081 }
2082 else if (n == 0)
2083 {
2084 /* Read all there was. */
2085 if (buf_pos == 0)
2086 xfree (buf);
2087 else
2088 *buf_p = buf;
2089 return buf_pos;
2090 }
2091
2092 buf_pos += n;
2093
2094 /* If the buffer is filling up, expand it. */
2095 if (buf_alloc < buf_pos * 2)
2096 {
2097 buf_alloc *= 2;
2098 buf = xrealloc (buf, buf_alloc);
2099 }
2100
2101 QUIT;
2102 }
2103 }
2104
2105 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2106 the size of the transferred data. See the declaration in "target.h"
2107 function for more information about the return value. */
2108
2109 LONGEST
2110 target_read_alloc (struct target_ops *ops, enum target_object object,
2111 const char *annex, gdb_byte **buf_p)
2112 {
2113 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2114 }
2115
2116 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2117 returned as a string, allocated using xmalloc. If an error occurs
2118 or the transfer is unsupported, NULL is returned. Empty objects
2119 are returned as allocated but empty strings. A warning is issued
2120 if the result contains any embedded NUL bytes. */
2121
2122 char *
2123 target_read_stralloc (struct target_ops *ops, enum target_object object,
2124 const char *annex)
2125 {
2126 gdb_byte *buffer;
2127 LONGEST transferred;
2128
2129 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2130
2131 if (transferred < 0)
2132 return NULL;
2133
2134 if (transferred == 0)
2135 return xstrdup ("");
2136
2137 buffer[transferred] = 0;
2138 if (strlen (buffer) < transferred)
2139 warning (_("target object %d, annex %s, "
2140 "contained unexpected null characters"),
2141 (int) object, annex ? annex : "(none)");
2142
2143 return (char *) buffer;
2144 }
2145
2146 /* Memory transfer methods. */
2147
2148 void
2149 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2150 LONGEST len)
2151 {
2152 /* This method is used to read from an alternate, non-current
2153 target. This read must bypass the overlay support (as symbols
2154 don't match this target), and GDB's internal cache (wrong cache
2155 for this target). */
2156 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2157 != len)
2158 memory_error (EIO, addr);
2159 }
2160
2161 ULONGEST
2162 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2163 int len, enum bfd_endian byte_order)
2164 {
2165 gdb_byte buf[sizeof (ULONGEST)];
2166
2167 gdb_assert (len <= sizeof (buf));
2168 get_target_memory (ops, addr, buf, len);
2169 return extract_unsigned_integer (buf, len, byte_order);
2170 }
2171
2172 int
2173 target_insert_breakpoint (struct gdbarch *gdbarch,
2174 struct bp_target_info *bp_tgt)
2175 {
2176 if (!may_insert_breakpoints)
2177 {
2178 warning (_("May not insert breakpoints"));
2179 return 1;
2180 }
2181
2182 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2183 }
2184
2185 int
2186 target_remove_breakpoint (struct gdbarch *gdbarch,
2187 struct bp_target_info *bp_tgt)
2188 {
2189 /* This is kind of a weird case to handle, but the permission might
2190 have been changed after breakpoints were inserted - in which case
2191 we should just take the user literally and assume that any
2192 breakpoints should be left in place. */
2193 if (!may_insert_breakpoints)
2194 {
2195 warning (_("May not remove breakpoints"));
2196 return 1;
2197 }
2198
2199 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2200 }
2201
2202 static void
2203 target_info (char *args, int from_tty)
2204 {
2205 struct target_ops *t;
2206 int has_all_mem = 0;
2207
2208 if (symfile_objfile != NULL)
2209 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2210
2211 for (t = target_stack; t != NULL; t = t->beneath)
2212 {
2213 if (!(*t->to_has_memory) (t))
2214 continue;
2215
2216 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2217 continue;
2218 if (has_all_mem)
2219 printf_unfiltered (_("\tWhile running this, "
2220 "GDB does not access memory from...\n"));
2221 printf_unfiltered ("%s:\n", t->to_longname);
2222 (t->to_files_info) (t);
2223 has_all_mem = (*t->to_has_all_memory) (t);
2224 }
2225 }
2226
2227 /* This function is called before any new inferior is created, e.g.
2228 by running a program, attaching, or connecting to a target.
2229 It cleans up any state from previous invocations which might
2230 change between runs. This is a subset of what target_preopen
2231 resets (things which might change between targets). */
2232
2233 void
2234 target_pre_inferior (int from_tty)
2235 {
2236 /* Clear out solib state. Otherwise the solib state of the previous
2237 inferior might have survived and is entirely wrong for the new
2238 target. This has been observed on GNU/Linux using glibc 2.3. How
2239 to reproduce:
2240
2241 bash$ ./foo&
2242 [1] 4711
2243 bash$ ./foo&
2244 [1] 4712
2245 bash$ gdb ./foo
2246 [...]
2247 (gdb) attach 4711
2248 (gdb) detach
2249 (gdb) attach 4712
2250 Cannot access memory at address 0xdeadbeef
2251 */
2252
2253 /* In some OSs, the shared library list is the same/global/shared
2254 across inferiors. If code is shared between processes, so are
2255 memory regions and features. */
2256 if (!gdbarch_has_global_solist (target_gdbarch))
2257 {
2258 no_shared_libraries (NULL, from_tty);
2259
2260 invalidate_target_mem_regions ();
2261
2262 target_clear_description ();
2263 }
2264 }
2265
2266 /* Callback for iterate_over_inferiors. Gets rid of the given
2267 inferior. */
2268
2269 static int
2270 dispose_inferior (struct inferior *inf, void *args)
2271 {
2272 struct thread_info *thread;
2273
2274 thread = any_thread_of_process (inf->pid);
2275 if (thread)
2276 {
2277 switch_to_thread (thread->ptid);
2278
2279 /* Core inferiors actually should be detached, not killed. */
2280 if (target_has_execution)
2281 target_kill ();
2282 else
2283 target_detach (NULL, 0);
2284 }
2285
2286 return 0;
2287 }
2288
2289 /* This is to be called by the open routine before it does
2290 anything. */
2291
2292 void
2293 target_preopen (int from_tty)
2294 {
2295 dont_repeat ();
2296
2297 if (have_inferiors ())
2298 {
2299 if (!from_tty
2300 || !have_live_inferiors ()
2301 || query (_("A program is being debugged already. Kill it? ")))
2302 iterate_over_inferiors (dispose_inferior, NULL);
2303 else
2304 error (_("Program not killed."));
2305 }
2306
2307 /* Calling target_kill may remove the target from the stack. But if
2308 it doesn't (which seems like a win for UDI), remove it now. */
2309 /* Leave the exec target, though. The user may be switching from a
2310 live process to a core of the same program. */
2311 pop_all_targets_above (file_stratum, 0);
2312
2313 target_pre_inferior (from_tty);
2314 }
2315
2316 /* Detach a target after doing deferred register stores. */
2317
2318 void
2319 target_detach (char *args, int from_tty)
2320 {
2321 struct target_ops* t;
2322
2323 if (gdbarch_has_global_breakpoints (target_gdbarch))
2324 /* Don't remove global breakpoints here. They're removed on
2325 disconnection from the target. */
2326 ;
2327 else
2328 /* If we're in breakpoints-always-inserted mode, have to remove
2329 them before detaching. */
2330 remove_breakpoints_pid (PIDGET (inferior_ptid));
2331
2332 prepare_for_detach ();
2333
2334 for (t = current_target.beneath; t != NULL; t = t->beneath)
2335 {
2336 if (t->to_detach != NULL)
2337 {
2338 t->to_detach (t, args, from_tty);
2339 if (targetdebug)
2340 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2341 args, from_tty);
2342 return;
2343 }
2344 }
2345
2346 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2347 }
2348
2349 void
2350 target_disconnect (char *args, int from_tty)
2351 {
2352 struct target_ops *t;
2353
2354 /* If we're in breakpoints-always-inserted mode or if breakpoints
2355 are global across processes, we have to remove them before
2356 disconnecting. */
2357 remove_breakpoints ();
2358
2359 for (t = current_target.beneath; t != NULL; t = t->beneath)
2360 if (t->to_disconnect != NULL)
2361 {
2362 if (targetdebug)
2363 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2364 args, from_tty);
2365 t->to_disconnect (t, args, from_tty);
2366 return;
2367 }
2368
2369 tcomplain ();
2370 }
2371
2372 ptid_t
2373 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2374 {
2375 struct target_ops *t;
2376
2377 for (t = current_target.beneath; t != NULL; t = t->beneath)
2378 {
2379 if (t->to_wait != NULL)
2380 {
2381 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2382
2383 if (targetdebug)
2384 {
2385 char *status_string;
2386
2387 status_string = target_waitstatus_to_string (status);
2388 fprintf_unfiltered (gdb_stdlog,
2389 "target_wait (%d, status) = %d, %s\n",
2390 PIDGET (ptid), PIDGET (retval),
2391 status_string);
2392 xfree (status_string);
2393 }
2394
2395 return retval;
2396 }
2397 }
2398
2399 noprocess ();
2400 }
2401
2402 char *
2403 target_pid_to_str (ptid_t ptid)
2404 {
2405 struct target_ops *t;
2406
2407 for (t = current_target.beneath; t != NULL; t = t->beneath)
2408 {
2409 if (t->to_pid_to_str != NULL)
2410 return (*t->to_pid_to_str) (t, ptid);
2411 }
2412
2413 return normal_pid_to_str (ptid);
2414 }
2415
2416 void
2417 target_resume (ptid_t ptid, int step, enum target_signal signal)
2418 {
2419 struct target_ops *t;
2420
2421 target_dcache_invalidate ();
2422
2423 for (t = current_target.beneath; t != NULL; t = t->beneath)
2424 {
2425 if (t->to_resume != NULL)
2426 {
2427 t->to_resume (t, ptid, step, signal);
2428 if (targetdebug)
2429 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2430 PIDGET (ptid),
2431 step ? "step" : "continue",
2432 target_signal_to_name (signal));
2433
2434 registers_changed_ptid (ptid);
2435 set_executing (ptid, 1);
2436 set_running (ptid, 1);
2437 clear_inline_frame_state (ptid);
2438 return;
2439 }
2440 }
2441
2442 noprocess ();
2443 }
2444 /* Look through the list of possible targets for a target that can
2445 follow forks. */
2446
2447 int
2448 target_follow_fork (int follow_child)
2449 {
2450 struct target_ops *t;
2451
2452 for (t = current_target.beneath; t != NULL; t = t->beneath)
2453 {
2454 if (t->to_follow_fork != NULL)
2455 {
2456 int retval = t->to_follow_fork (t, follow_child);
2457
2458 if (targetdebug)
2459 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2460 follow_child, retval);
2461 return retval;
2462 }
2463 }
2464
2465 /* Some target returned a fork event, but did not know how to follow it. */
2466 internal_error (__FILE__, __LINE__,
2467 _("could not find a target to follow fork"));
2468 }
2469
2470 void
2471 target_mourn_inferior (void)
2472 {
2473 struct target_ops *t;
2474
2475 for (t = current_target.beneath; t != NULL; t = t->beneath)
2476 {
2477 if (t->to_mourn_inferior != NULL)
2478 {
2479 t->to_mourn_inferior (t);
2480 if (targetdebug)
2481 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2482
2483 /* We no longer need to keep handles on any of the object files.
2484 Make sure to release them to avoid unnecessarily locking any
2485 of them while we're not actually debugging. */
2486 bfd_cache_close_all ();
2487
2488 return;
2489 }
2490 }
2491
2492 internal_error (__FILE__, __LINE__,
2493 _("could not find a target to follow mourn inferior"));
2494 }
2495
2496 /* Look for a target which can describe architectural features, starting
2497 from TARGET. If we find one, return its description. */
2498
2499 const struct target_desc *
2500 target_read_description (struct target_ops *target)
2501 {
2502 struct target_ops *t;
2503
2504 for (t = target; t != NULL; t = t->beneath)
2505 if (t->to_read_description != NULL)
2506 {
2507 const struct target_desc *tdesc;
2508
2509 tdesc = t->to_read_description (t);
2510 if (tdesc)
2511 return tdesc;
2512 }
2513
2514 return NULL;
2515 }
2516
2517 /* The default implementation of to_search_memory.
2518 This implements a basic search of memory, reading target memory and
2519 performing the search here (as opposed to performing the search in on the
2520 target side with, for example, gdbserver). */
2521
2522 int
2523 simple_search_memory (struct target_ops *ops,
2524 CORE_ADDR start_addr, ULONGEST search_space_len,
2525 const gdb_byte *pattern, ULONGEST pattern_len,
2526 CORE_ADDR *found_addrp)
2527 {
2528 /* NOTE: also defined in find.c testcase. */
2529 #define SEARCH_CHUNK_SIZE 16000
2530 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2531 /* Buffer to hold memory contents for searching. */
2532 gdb_byte *search_buf;
2533 unsigned search_buf_size;
2534 struct cleanup *old_cleanups;
2535
2536 search_buf_size = chunk_size + pattern_len - 1;
2537
2538 /* No point in trying to allocate a buffer larger than the search space. */
2539 if (search_space_len < search_buf_size)
2540 search_buf_size = search_space_len;
2541
2542 search_buf = malloc (search_buf_size);
2543 if (search_buf == NULL)
2544 error (_("Unable to allocate memory to perform the search."));
2545 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2546
2547 /* Prime the search buffer. */
2548
2549 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2550 search_buf, start_addr, search_buf_size) != search_buf_size)
2551 {
2552 warning (_("Unable to access target memory at %s, halting search."),
2553 hex_string (start_addr));
2554 do_cleanups (old_cleanups);
2555 return -1;
2556 }
2557
2558 /* Perform the search.
2559
2560 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2561 When we've scanned N bytes we copy the trailing bytes to the start and
2562 read in another N bytes. */
2563
2564 while (search_space_len >= pattern_len)
2565 {
2566 gdb_byte *found_ptr;
2567 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2568
2569 found_ptr = memmem (search_buf, nr_search_bytes,
2570 pattern, pattern_len);
2571
2572 if (found_ptr != NULL)
2573 {
2574 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2575
2576 *found_addrp = found_addr;
2577 do_cleanups (old_cleanups);
2578 return 1;
2579 }
2580
2581 /* Not found in this chunk, skip to next chunk. */
2582
2583 /* Don't let search_space_len wrap here, it's unsigned. */
2584 if (search_space_len >= chunk_size)
2585 search_space_len -= chunk_size;
2586 else
2587 search_space_len = 0;
2588
2589 if (search_space_len >= pattern_len)
2590 {
2591 unsigned keep_len = search_buf_size - chunk_size;
2592 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2593 int nr_to_read;
2594
2595 /* Copy the trailing part of the previous iteration to the front
2596 of the buffer for the next iteration. */
2597 gdb_assert (keep_len == pattern_len - 1);
2598 memcpy (search_buf, search_buf + chunk_size, keep_len);
2599
2600 nr_to_read = min (search_space_len - keep_len, chunk_size);
2601
2602 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2603 search_buf + keep_len, read_addr,
2604 nr_to_read) != nr_to_read)
2605 {
2606 warning (_("Unable to access target "
2607 "memory at %s, halting search."),
2608 hex_string (read_addr));
2609 do_cleanups (old_cleanups);
2610 return -1;
2611 }
2612
2613 start_addr += chunk_size;
2614 }
2615 }
2616
2617 /* Not found. */
2618
2619 do_cleanups (old_cleanups);
2620 return 0;
2621 }
2622
2623 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2624 sequence of bytes in PATTERN with length PATTERN_LEN.
2625
2626 The result is 1 if found, 0 if not found, and -1 if there was an error
2627 requiring halting of the search (e.g. memory read error).
2628 If the pattern is found the address is recorded in FOUND_ADDRP. */
2629
2630 int
2631 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2632 const gdb_byte *pattern, ULONGEST pattern_len,
2633 CORE_ADDR *found_addrp)
2634 {
2635 struct target_ops *t;
2636 int found;
2637
2638 /* We don't use INHERIT to set current_target.to_search_memory,
2639 so we have to scan the target stack and handle targetdebug
2640 ourselves. */
2641
2642 if (targetdebug)
2643 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2644 hex_string (start_addr));
2645
2646 for (t = current_target.beneath; t != NULL; t = t->beneath)
2647 if (t->to_search_memory != NULL)
2648 break;
2649
2650 if (t != NULL)
2651 {
2652 found = t->to_search_memory (t, start_addr, search_space_len,
2653 pattern, pattern_len, found_addrp);
2654 }
2655 else
2656 {
2657 /* If a special version of to_search_memory isn't available, use the
2658 simple version. */
2659 found = simple_search_memory (current_target.beneath,
2660 start_addr, search_space_len,
2661 pattern, pattern_len, found_addrp);
2662 }
2663
2664 if (targetdebug)
2665 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2666
2667 return found;
2668 }
2669
2670 /* Look through the currently pushed targets. If none of them will
2671 be able to restart the currently running process, issue an error
2672 message. */
2673
2674 void
2675 target_require_runnable (void)
2676 {
2677 struct target_ops *t;
2678
2679 for (t = target_stack; t != NULL; t = t->beneath)
2680 {
2681 /* If this target knows how to create a new program, then
2682 assume we will still be able to after killing the current
2683 one. Either killing and mourning will not pop T, or else
2684 find_default_run_target will find it again. */
2685 if (t->to_create_inferior != NULL)
2686 return;
2687
2688 /* Do not worry about thread_stratum targets that can not
2689 create inferiors. Assume they will be pushed again if
2690 necessary, and continue to the process_stratum. */
2691 if (t->to_stratum == thread_stratum
2692 || t->to_stratum == arch_stratum)
2693 continue;
2694
2695 error (_("The \"%s\" target does not support \"run\". "
2696 "Try \"help target\" or \"continue\"."),
2697 t->to_shortname);
2698 }
2699
2700 /* This function is only called if the target is running. In that
2701 case there should have been a process_stratum target and it
2702 should either know how to create inferiors, or not... */
2703 internal_error (__FILE__, __LINE__, _("No targets found"));
2704 }
2705
2706 /* Look through the list of possible targets for a target that can
2707 execute a run or attach command without any other data. This is
2708 used to locate the default process stratum.
2709
2710 If DO_MESG is not NULL, the result is always valid (error() is
2711 called for errors); else, return NULL on error. */
2712
2713 static struct target_ops *
2714 find_default_run_target (char *do_mesg)
2715 {
2716 struct target_ops **t;
2717 struct target_ops *runable = NULL;
2718 int count;
2719
2720 count = 0;
2721
2722 for (t = target_structs; t < target_structs + target_struct_size;
2723 ++t)
2724 {
2725 if ((*t)->to_can_run && target_can_run (*t))
2726 {
2727 runable = *t;
2728 ++count;
2729 }
2730 }
2731
2732 if (count != 1)
2733 {
2734 if (do_mesg)
2735 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2736 else
2737 return NULL;
2738 }
2739
2740 return runable;
2741 }
2742
2743 void
2744 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2745 {
2746 struct target_ops *t;
2747
2748 t = find_default_run_target ("attach");
2749 (t->to_attach) (t, args, from_tty);
2750 return;
2751 }
2752
2753 void
2754 find_default_create_inferior (struct target_ops *ops,
2755 char *exec_file, char *allargs, char **env,
2756 int from_tty)
2757 {
2758 struct target_ops *t;
2759
2760 t = find_default_run_target ("run");
2761 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2762 return;
2763 }
2764
2765 static int
2766 find_default_can_async_p (void)
2767 {
2768 struct target_ops *t;
2769
2770 /* This may be called before the target is pushed on the stack;
2771 look for the default process stratum. If there's none, gdb isn't
2772 configured with a native debugger, and target remote isn't
2773 connected yet. */
2774 t = find_default_run_target (NULL);
2775 if (t && t->to_can_async_p)
2776 return (t->to_can_async_p) ();
2777 return 0;
2778 }
2779
2780 static int
2781 find_default_is_async_p (void)
2782 {
2783 struct target_ops *t;
2784
2785 /* This may be called before the target is pushed on the stack;
2786 look for the default process stratum. If there's none, gdb isn't
2787 configured with a native debugger, and target remote isn't
2788 connected yet. */
2789 t = find_default_run_target (NULL);
2790 if (t && t->to_is_async_p)
2791 return (t->to_is_async_p) ();
2792 return 0;
2793 }
2794
2795 static int
2796 find_default_supports_non_stop (void)
2797 {
2798 struct target_ops *t;
2799
2800 t = find_default_run_target (NULL);
2801 if (t && t->to_supports_non_stop)
2802 return (t->to_supports_non_stop) ();
2803 return 0;
2804 }
2805
2806 int
2807 target_supports_non_stop (void)
2808 {
2809 struct target_ops *t;
2810
2811 for (t = &current_target; t != NULL; t = t->beneath)
2812 if (t->to_supports_non_stop)
2813 return t->to_supports_non_stop ();
2814
2815 return 0;
2816 }
2817
2818
2819 char *
2820 target_get_osdata (const char *type)
2821 {
2822 struct target_ops *t;
2823
2824 /* If we're already connected to something that can get us OS
2825 related data, use it. Otherwise, try using the native
2826 target. */
2827 if (current_target.to_stratum >= process_stratum)
2828 t = current_target.beneath;
2829 else
2830 t = find_default_run_target ("get OS data");
2831
2832 if (!t)
2833 return NULL;
2834
2835 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2836 }
2837
2838 /* Determine the current address space of thread PTID. */
2839
2840 struct address_space *
2841 target_thread_address_space (ptid_t ptid)
2842 {
2843 struct address_space *aspace;
2844 struct inferior *inf;
2845 struct target_ops *t;
2846
2847 for (t = current_target.beneath; t != NULL; t = t->beneath)
2848 {
2849 if (t->to_thread_address_space != NULL)
2850 {
2851 aspace = t->to_thread_address_space (t, ptid);
2852 gdb_assert (aspace);
2853
2854 if (targetdebug)
2855 fprintf_unfiltered (gdb_stdlog,
2856 "target_thread_address_space (%s) = %d\n",
2857 target_pid_to_str (ptid),
2858 address_space_num (aspace));
2859 return aspace;
2860 }
2861 }
2862
2863 /* Fall-back to the "main" address space of the inferior. */
2864 inf = find_inferior_pid (ptid_get_pid (ptid));
2865
2866 if (inf == NULL || inf->aspace == NULL)
2867 internal_error (__FILE__, __LINE__,
2868 _("Can't determine the current "
2869 "address space of thread %s\n"),
2870 target_pid_to_str (ptid));
2871
2872 return inf->aspace;
2873 }
2874
2875 static int
2876 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
2877 {
2878 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
2879 }
2880
2881 static int
2882 default_watchpoint_addr_within_range (struct target_ops *target,
2883 CORE_ADDR addr,
2884 CORE_ADDR start, int length)
2885 {
2886 return addr >= start && addr < start + length;
2887 }
2888
2889 static struct gdbarch *
2890 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2891 {
2892 return target_gdbarch;
2893 }
2894
2895 static int
2896 return_zero (void)
2897 {
2898 return 0;
2899 }
2900
2901 static int
2902 return_one (void)
2903 {
2904 return 1;
2905 }
2906
2907 static int
2908 return_minus_one (void)
2909 {
2910 return -1;
2911 }
2912
2913 /* Find a single runnable target in the stack and return it. If for
2914 some reason there is more than one, return NULL. */
2915
2916 struct target_ops *
2917 find_run_target (void)
2918 {
2919 struct target_ops **t;
2920 struct target_ops *runable = NULL;
2921 int count;
2922
2923 count = 0;
2924
2925 for (t = target_structs; t < target_structs + target_struct_size; ++t)
2926 {
2927 if ((*t)->to_can_run && target_can_run (*t))
2928 {
2929 runable = *t;
2930 ++count;
2931 }
2932 }
2933
2934 return (count == 1 ? runable : NULL);
2935 }
2936
2937 /*
2938 * Find the next target down the stack from the specified target.
2939 */
2940
2941 struct target_ops *
2942 find_target_beneath (struct target_ops *t)
2943 {
2944 return t->beneath;
2945 }
2946
2947 \f
2948 /* The inferior process has died. Long live the inferior! */
2949
2950 void
2951 generic_mourn_inferior (void)
2952 {
2953 ptid_t ptid;
2954
2955 ptid = inferior_ptid;
2956 inferior_ptid = null_ptid;
2957
2958 if (!ptid_equal (ptid, null_ptid))
2959 {
2960 int pid = ptid_get_pid (ptid);
2961 exit_inferior (pid);
2962 }
2963
2964 breakpoint_init_inferior (inf_exited);
2965 registers_changed ();
2966
2967 reopen_exec_file ();
2968 reinit_frame_cache ();
2969
2970 if (deprecated_detach_hook)
2971 deprecated_detach_hook ();
2972 }
2973 \f
2974 /* Helper function for child_wait and the derivatives of child_wait.
2975 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
2976 translation of that in OURSTATUS. */
2977 void
2978 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
2979 {
2980 if (WIFEXITED (hoststatus))
2981 {
2982 ourstatus->kind = TARGET_WAITKIND_EXITED;
2983 ourstatus->value.integer = WEXITSTATUS (hoststatus);
2984 }
2985 else if (!WIFSTOPPED (hoststatus))
2986 {
2987 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2988 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
2989 }
2990 else
2991 {
2992 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2993 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
2994 }
2995 }
2996 \f
2997 /* Convert a normal process ID to a string. Returns the string in a
2998 static buffer. */
2999
3000 char *
3001 normal_pid_to_str (ptid_t ptid)
3002 {
3003 static char buf[32];
3004
3005 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3006 return buf;
3007 }
3008
3009 static char *
3010 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3011 {
3012 return normal_pid_to_str (ptid);
3013 }
3014
3015 /* Error-catcher for target_find_memory_regions. */
3016 static int
3017 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3018 {
3019 error (_("Command not implemented for this target."));
3020 return 0;
3021 }
3022
3023 /* Error-catcher for target_make_corefile_notes. */
3024 static char *
3025 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3026 {
3027 error (_("Command not implemented for this target."));
3028 return NULL;
3029 }
3030
3031 /* Error-catcher for target_get_bookmark. */
3032 static gdb_byte *
3033 dummy_get_bookmark (char *ignore1, int ignore2)
3034 {
3035 tcomplain ();
3036 return NULL;
3037 }
3038
3039 /* Error-catcher for target_goto_bookmark. */
3040 static void
3041 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3042 {
3043 tcomplain ();
3044 }
3045
3046 /* Set up the handful of non-empty slots needed by the dummy target
3047 vector. */
3048
3049 static void
3050 init_dummy_target (void)
3051 {
3052 dummy_target.to_shortname = "None";
3053 dummy_target.to_longname = "None";
3054 dummy_target.to_doc = "";
3055 dummy_target.to_attach = find_default_attach;
3056 dummy_target.to_detach =
3057 (void (*)(struct target_ops *, char *, int))target_ignore;
3058 dummy_target.to_create_inferior = find_default_create_inferior;
3059 dummy_target.to_can_async_p = find_default_can_async_p;
3060 dummy_target.to_is_async_p = find_default_is_async_p;
3061 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3062 dummy_target.to_pid_to_str = dummy_pid_to_str;
3063 dummy_target.to_stratum = dummy_stratum;
3064 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3065 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3066 dummy_target.to_get_bookmark = dummy_get_bookmark;
3067 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3068 dummy_target.to_xfer_partial = default_xfer_partial;
3069 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3070 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3071 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3072 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3073 dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
3074 dummy_target.to_stopped_by_watchpoint = return_zero;
3075 dummy_target.to_stopped_data_address =
3076 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3077 dummy_target.to_magic = OPS_MAGIC;
3078 }
3079 \f
3080 static void
3081 debug_to_open (char *args, int from_tty)
3082 {
3083 debug_target.to_open (args, from_tty);
3084
3085 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3086 }
3087
3088 void
3089 target_close (struct target_ops *targ, int quitting)
3090 {
3091 if (targ->to_xclose != NULL)
3092 targ->to_xclose (targ, quitting);
3093 else if (targ->to_close != NULL)
3094 targ->to_close (quitting);
3095
3096 if (targetdebug)
3097 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3098 }
3099
3100 void
3101 target_attach (char *args, int from_tty)
3102 {
3103 struct target_ops *t;
3104
3105 for (t = current_target.beneath; t != NULL; t = t->beneath)
3106 {
3107 if (t->to_attach != NULL)
3108 {
3109 t->to_attach (t, args, from_tty);
3110 if (targetdebug)
3111 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3112 args, from_tty);
3113 return;
3114 }
3115 }
3116
3117 internal_error (__FILE__, __LINE__,
3118 _("could not find a target to attach"));
3119 }
3120
3121 int
3122 target_thread_alive (ptid_t ptid)
3123 {
3124 struct target_ops *t;
3125
3126 for (t = current_target.beneath; t != NULL; t = t->beneath)
3127 {
3128 if (t->to_thread_alive != NULL)
3129 {
3130 int retval;
3131
3132 retval = t->to_thread_alive (t, ptid);
3133 if (targetdebug)
3134 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3135 PIDGET (ptid), retval);
3136
3137 return retval;
3138 }
3139 }
3140
3141 return 0;
3142 }
3143
3144 void
3145 target_find_new_threads (void)
3146 {
3147 struct target_ops *t;
3148
3149 for (t = current_target.beneath; t != NULL; t = t->beneath)
3150 {
3151 if (t->to_find_new_threads != NULL)
3152 {
3153 t->to_find_new_threads (t);
3154 if (targetdebug)
3155 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3156
3157 return;
3158 }
3159 }
3160 }
3161
3162 void
3163 target_stop (ptid_t ptid)
3164 {
3165 if (!may_stop)
3166 {
3167 warning (_("May not interrupt or stop the target, ignoring attempt"));
3168 return;
3169 }
3170
3171 (*current_target.to_stop) (ptid);
3172 }
3173
3174 static void
3175 debug_to_post_attach (int pid)
3176 {
3177 debug_target.to_post_attach (pid);
3178
3179 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3180 }
3181
3182 /* Return a pretty printed form of target_waitstatus.
3183 Space for the result is malloc'd, caller must free. */
3184
3185 char *
3186 target_waitstatus_to_string (const struct target_waitstatus *ws)
3187 {
3188 const char *kind_str = "status->kind = ";
3189
3190 switch (ws->kind)
3191 {
3192 case TARGET_WAITKIND_EXITED:
3193 return xstrprintf ("%sexited, status = %d",
3194 kind_str, ws->value.integer);
3195 case TARGET_WAITKIND_STOPPED:
3196 return xstrprintf ("%sstopped, signal = %s",
3197 kind_str, target_signal_to_name (ws->value.sig));
3198 case TARGET_WAITKIND_SIGNALLED:
3199 return xstrprintf ("%ssignalled, signal = %s",
3200 kind_str, target_signal_to_name (ws->value.sig));
3201 case TARGET_WAITKIND_LOADED:
3202 return xstrprintf ("%sloaded", kind_str);
3203 case TARGET_WAITKIND_FORKED:
3204 return xstrprintf ("%sforked", kind_str);
3205 case TARGET_WAITKIND_VFORKED:
3206 return xstrprintf ("%svforked", kind_str);
3207 case TARGET_WAITKIND_EXECD:
3208 return xstrprintf ("%sexecd", kind_str);
3209 case TARGET_WAITKIND_SYSCALL_ENTRY:
3210 return xstrprintf ("%sentered syscall", kind_str);
3211 case TARGET_WAITKIND_SYSCALL_RETURN:
3212 return xstrprintf ("%sexited syscall", kind_str);
3213 case TARGET_WAITKIND_SPURIOUS:
3214 return xstrprintf ("%sspurious", kind_str);
3215 case TARGET_WAITKIND_IGNORE:
3216 return xstrprintf ("%signore", kind_str);
3217 case TARGET_WAITKIND_NO_HISTORY:
3218 return xstrprintf ("%sno-history", kind_str);
3219 default:
3220 return xstrprintf ("%sunknown???", kind_str);
3221 }
3222 }
3223
3224 static void
3225 debug_print_register (const char * func,
3226 struct regcache *regcache, int regno)
3227 {
3228 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3229
3230 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3231 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3232 && gdbarch_register_name (gdbarch, regno) != NULL
3233 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3234 fprintf_unfiltered (gdb_stdlog, "(%s)",
3235 gdbarch_register_name (gdbarch, regno));
3236 else
3237 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3238 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3239 {
3240 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3241 int i, size = register_size (gdbarch, regno);
3242 unsigned char buf[MAX_REGISTER_SIZE];
3243
3244 regcache_raw_collect (regcache, regno, buf);
3245 fprintf_unfiltered (gdb_stdlog, " = ");
3246 for (i = 0; i < size; i++)
3247 {
3248 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3249 }
3250 if (size <= sizeof (LONGEST))
3251 {
3252 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3253
3254 fprintf_unfiltered (gdb_stdlog, " %s %s",
3255 core_addr_to_string_nz (val), plongest (val));
3256 }
3257 }
3258 fprintf_unfiltered (gdb_stdlog, "\n");
3259 }
3260
3261 void
3262 target_fetch_registers (struct regcache *regcache, int regno)
3263 {
3264 struct target_ops *t;
3265
3266 for (t = current_target.beneath; t != NULL; t = t->beneath)
3267 {
3268 if (t->to_fetch_registers != NULL)
3269 {
3270 t->to_fetch_registers (t, regcache, regno);
3271 if (targetdebug)
3272 debug_print_register ("target_fetch_registers", regcache, regno);
3273 return;
3274 }
3275 }
3276 }
3277
3278 void
3279 target_store_registers (struct regcache *regcache, int regno)
3280 {
3281 struct target_ops *t;
3282
3283 if (!may_write_registers)
3284 error (_("Writing to registers is not allowed (regno %d)"), regno);
3285
3286 for (t = current_target.beneath; t != NULL; t = t->beneath)
3287 {
3288 if (t->to_store_registers != NULL)
3289 {
3290 t->to_store_registers (t, regcache, regno);
3291 if (targetdebug)
3292 {
3293 debug_print_register ("target_store_registers", regcache, regno);
3294 }
3295 return;
3296 }
3297 }
3298
3299 noprocess ();
3300 }
3301
3302 int
3303 target_core_of_thread (ptid_t ptid)
3304 {
3305 struct target_ops *t;
3306
3307 for (t = current_target.beneath; t != NULL; t = t->beneath)
3308 {
3309 if (t->to_core_of_thread != NULL)
3310 {
3311 int retval = t->to_core_of_thread (t, ptid);
3312
3313 if (targetdebug)
3314 fprintf_unfiltered (gdb_stdlog,
3315 "target_core_of_thread (%d) = %d\n",
3316 PIDGET (ptid), retval);
3317 return retval;
3318 }
3319 }
3320
3321 return -1;
3322 }
3323
3324 int
3325 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3326 {
3327 struct target_ops *t;
3328
3329 for (t = current_target.beneath; t != NULL; t = t->beneath)
3330 {
3331 if (t->to_verify_memory != NULL)
3332 {
3333 int retval = t->to_verify_memory (t, data, memaddr, size);
3334
3335 if (targetdebug)
3336 fprintf_unfiltered (gdb_stdlog,
3337 "target_verify_memory (%s, %s) = %d\n",
3338 paddress (target_gdbarch, memaddr),
3339 pulongest (size),
3340 retval);
3341 return retval;
3342 }
3343 }
3344
3345 tcomplain ();
3346 }
3347
3348 static void
3349 debug_to_prepare_to_store (struct regcache *regcache)
3350 {
3351 debug_target.to_prepare_to_store (regcache);
3352
3353 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3354 }
3355
3356 static int
3357 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3358 int write, struct mem_attrib *attrib,
3359 struct target_ops *target)
3360 {
3361 int retval;
3362
3363 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3364 attrib, target);
3365
3366 fprintf_unfiltered (gdb_stdlog,
3367 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3368 paddress (target_gdbarch, memaddr), len,
3369 write ? "write" : "read", retval);
3370
3371 if (retval > 0)
3372 {
3373 int i;
3374
3375 fputs_unfiltered (", bytes =", gdb_stdlog);
3376 for (i = 0; i < retval; i++)
3377 {
3378 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3379 {
3380 if (targetdebug < 2 && i > 0)
3381 {
3382 fprintf_unfiltered (gdb_stdlog, " ...");
3383 break;
3384 }
3385 fprintf_unfiltered (gdb_stdlog, "\n");
3386 }
3387
3388 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3389 }
3390 }
3391
3392 fputc_unfiltered ('\n', gdb_stdlog);
3393
3394 return retval;
3395 }
3396
3397 static void
3398 debug_to_files_info (struct target_ops *target)
3399 {
3400 debug_target.to_files_info (target);
3401
3402 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3403 }
3404
3405 static int
3406 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3407 struct bp_target_info *bp_tgt)
3408 {
3409 int retval;
3410
3411 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3412
3413 fprintf_unfiltered (gdb_stdlog,
3414 "target_insert_breakpoint (%s, xxx) = %ld\n",
3415 core_addr_to_string (bp_tgt->placed_address),
3416 (unsigned long) retval);
3417 return retval;
3418 }
3419
3420 static int
3421 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3422 struct bp_target_info *bp_tgt)
3423 {
3424 int retval;
3425
3426 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3427
3428 fprintf_unfiltered (gdb_stdlog,
3429 "target_remove_breakpoint (%s, xxx) = %ld\n",
3430 core_addr_to_string (bp_tgt->placed_address),
3431 (unsigned long) retval);
3432 return retval;
3433 }
3434
3435 static int
3436 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3437 {
3438 int retval;
3439
3440 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3441
3442 fprintf_unfiltered (gdb_stdlog,
3443 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3444 (unsigned long) type,
3445 (unsigned long) cnt,
3446 (unsigned long) from_tty,
3447 (unsigned long) retval);
3448 return retval;
3449 }
3450
3451 static int
3452 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3453 {
3454 CORE_ADDR retval;
3455
3456 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3457
3458 fprintf_unfiltered (gdb_stdlog,
3459 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3460 core_addr_to_string (addr), (unsigned long) len,
3461 core_addr_to_string (retval));
3462 return retval;
3463 }
3464
3465 static int
3466 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
3467 struct expression *cond)
3468 {
3469 int retval;
3470
3471 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
3472 rw, cond);
3473
3474 fprintf_unfiltered (gdb_stdlog,
3475 "target_can_accel_watchpoint_condition "
3476 "(%s, %d, %d, %s) = %ld\n",
3477 core_addr_to_string (addr), len, rw,
3478 host_address_to_string (cond), (unsigned long) retval);
3479 return retval;
3480 }
3481
3482 static int
3483 debug_to_stopped_by_watchpoint (void)
3484 {
3485 int retval;
3486
3487 retval = debug_target.to_stopped_by_watchpoint ();
3488
3489 fprintf_unfiltered (gdb_stdlog,
3490 "target_stopped_by_watchpoint () = %ld\n",
3491 (unsigned long) retval);
3492 return retval;
3493 }
3494
3495 static int
3496 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3497 {
3498 int retval;
3499
3500 retval = debug_target.to_stopped_data_address (target, addr);
3501
3502 fprintf_unfiltered (gdb_stdlog,
3503 "target_stopped_data_address ([%s]) = %ld\n",
3504 core_addr_to_string (*addr),
3505 (unsigned long)retval);
3506 return retval;
3507 }
3508
3509 static int
3510 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3511 CORE_ADDR addr,
3512 CORE_ADDR start, int length)
3513 {
3514 int retval;
3515
3516 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3517 start, length);
3518
3519 fprintf_filtered (gdb_stdlog,
3520 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3521 core_addr_to_string (addr), core_addr_to_string (start),
3522 length, retval);
3523 return retval;
3524 }
3525
3526 static int
3527 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3528 struct bp_target_info *bp_tgt)
3529 {
3530 int retval;
3531
3532 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3533
3534 fprintf_unfiltered (gdb_stdlog,
3535 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3536 core_addr_to_string (bp_tgt->placed_address),
3537 (unsigned long) retval);
3538 return retval;
3539 }
3540
3541 static int
3542 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3543 struct bp_target_info *bp_tgt)
3544 {
3545 int retval;
3546
3547 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3548
3549 fprintf_unfiltered (gdb_stdlog,
3550 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3551 core_addr_to_string (bp_tgt->placed_address),
3552 (unsigned long) retval);
3553 return retval;
3554 }
3555
3556 static int
3557 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
3558 struct expression *cond)
3559 {
3560 int retval;
3561
3562 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
3563
3564 fprintf_unfiltered (gdb_stdlog,
3565 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3566 core_addr_to_string (addr), len, type,
3567 host_address_to_string (cond), (unsigned long) retval);
3568 return retval;
3569 }
3570
3571 static int
3572 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
3573 struct expression *cond)
3574 {
3575 int retval;
3576
3577 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
3578
3579 fprintf_unfiltered (gdb_stdlog,
3580 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3581 core_addr_to_string (addr), len, type,
3582 host_address_to_string (cond), (unsigned long) retval);
3583 return retval;
3584 }
3585
3586 static void
3587 debug_to_terminal_init (void)
3588 {
3589 debug_target.to_terminal_init ();
3590
3591 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3592 }
3593
3594 static void
3595 debug_to_terminal_inferior (void)
3596 {
3597 debug_target.to_terminal_inferior ();
3598
3599 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3600 }
3601
3602 static void
3603 debug_to_terminal_ours_for_output (void)
3604 {
3605 debug_target.to_terminal_ours_for_output ();
3606
3607 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3608 }
3609
3610 static void
3611 debug_to_terminal_ours (void)
3612 {
3613 debug_target.to_terminal_ours ();
3614
3615 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3616 }
3617
3618 static void
3619 debug_to_terminal_save_ours (void)
3620 {
3621 debug_target.to_terminal_save_ours ();
3622
3623 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3624 }
3625
3626 static void
3627 debug_to_terminal_info (char *arg, int from_tty)
3628 {
3629 debug_target.to_terminal_info (arg, from_tty);
3630
3631 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3632 from_tty);
3633 }
3634
3635 static void
3636 debug_to_load (char *args, int from_tty)
3637 {
3638 debug_target.to_load (args, from_tty);
3639
3640 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3641 }
3642
3643 static int
3644 debug_to_lookup_symbol (char *name, CORE_ADDR *addrp)
3645 {
3646 int retval;
3647
3648 retval = debug_target.to_lookup_symbol (name, addrp);
3649
3650 fprintf_unfiltered (gdb_stdlog, "target_lookup_symbol (%s, xxx)\n", name);
3651
3652 return retval;
3653 }
3654
3655 static void
3656 debug_to_post_startup_inferior (ptid_t ptid)
3657 {
3658 debug_target.to_post_startup_inferior (ptid);
3659
3660 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3661 PIDGET (ptid));
3662 }
3663
3664 static int
3665 debug_to_insert_fork_catchpoint (int pid)
3666 {
3667 int retval;
3668
3669 retval = debug_target.to_insert_fork_catchpoint (pid);
3670
3671 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
3672 pid, retval);
3673
3674 return retval;
3675 }
3676
3677 static int
3678 debug_to_remove_fork_catchpoint (int pid)
3679 {
3680 int retval;
3681
3682 retval = debug_target.to_remove_fork_catchpoint (pid);
3683
3684 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3685 pid, retval);
3686
3687 return retval;
3688 }
3689
3690 static int
3691 debug_to_insert_vfork_catchpoint (int pid)
3692 {
3693 int retval;
3694
3695 retval = debug_target.to_insert_vfork_catchpoint (pid);
3696
3697 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
3698 pid, retval);
3699
3700 return retval;
3701 }
3702
3703 static int
3704 debug_to_remove_vfork_catchpoint (int pid)
3705 {
3706 int retval;
3707
3708 retval = debug_target.to_remove_vfork_catchpoint (pid);
3709
3710 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3711 pid, retval);
3712
3713 return retval;
3714 }
3715
3716 static int
3717 debug_to_insert_exec_catchpoint (int pid)
3718 {
3719 int retval;
3720
3721 retval = debug_target.to_insert_exec_catchpoint (pid);
3722
3723 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
3724 pid, retval);
3725
3726 return retval;
3727 }
3728
3729 static int
3730 debug_to_remove_exec_catchpoint (int pid)
3731 {
3732 int retval;
3733
3734 retval = debug_target.to_remove_exec_catchpoint (pid);
3735
3736 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3737 pid, retval);
3738
3739 return retval;
3740 }
3741
3742 static int
3743 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3744 {
3745 int has_exited;
3746
3747 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3748
3749 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3750 pid, wait_status, *exit_status, has_exited);
3751
3752 return has_exited;
3753 }
3754
3755 static int
3756 debug_to_can_run (void)
3757 {
3758 int retval;
3759
3760 retval = debug_target.to_can_run ();
3761
3762 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3763
3764 return retval;
3765 }
3766
3767 static void
3768 debug_to_notice_signals (ptid_t ptid)
3769 {
3770 debug_target.to_notice_signals (ptid);
3771
3772 fprintf_unfiltered (gdb_stdlog, "target_notice_signals (%d)\n",
3773 PIDGET (ptid));
3774 }
3775
3776 static struct gdbarch *
3777 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3778 {
3779 struct gdbarch *retval;
3780
3781 retval = debug_target.to_thread_architecture (ops, ptid);
3782
3783 fprintf_unfiltered (gdb_stdlog,
3784 "target_thread_architecture (%s) = %s [%s]\n",
3785 target_pid_to_str (ptid),
3786 host_address_to_string (retval),
3787 gdbarch_bfd_arch_info (retval)->printable_name);
3788 return retval;
3789 }
3790
3791 static void
3792 debug_to_stop (ptid_t ptid)
3793 {
3794 debug_target.to_stop (ptid);
3795
3796 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3797 target_pid_to_str (ptid));
3798 }
3799
3800 static void
3801 debug_to_rcmd (char *command,
3802 struct ui_file *outbuf)
3803 {
3804 debug_target.to_rcmd (command, outbuf);
3805 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3806 }
3807
3808 static char *
3809 debug_to_pid_to_exec_file (int pid)
3810 {
3811 char *exec_file;
3812
3813 exec_file = debug_target.to_pid_to_exec_file (pid);
3814
3815 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3816 pid, exec_file);
3817
3818 return exec_file;
3819 }
3820
3821 static void
3822 setup_target_debug (void)
3823 {
3824 memcpy (&debug_target, &current_target, sizeof debug_target);
3825
3826 current_target.to_open = debug_to_open;
3827 current_target.to_post_attach = debug_to_post_attach;
3828 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3829 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3830 current_target.to_files_info = debug_to_files_info;
3831 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3832 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
3833 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
3834 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
3835 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
3836 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
3837 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
3838 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
3839 current_target.to_stopped_data_address = debug_to_stopped_data_address;
3840 current_target.to_watchpoint_addr_within_range
3841 = debug_to_watchpoint_addr_within_range;
3842 current_target.to_region_ok_for_hw_watchpoint
3843 = debug_to_region_ok_for_hw_watchpoint;
3844 current_target.to_can_accel_watchpoint_condition
3845 = debug_to_can_accel_watchpoint_condition;
3846 current_target.to_terminal_init = debug_to_terminal_init;
3847 current_target.to_terminal_inferior = debug_to_terminal_inferior;
3848 current_target.to_terminal_ours_for_output
3849 = debug_to_terminal_ours_for_output;
3850 current_target.to_terminal_ours = debug_to_terminal_ours;
3851 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
3852 current_target.to_terminal_info = debug_to_terminal_info;
3853 current_target.to_load = debug_to_load;
3854 current_target.to_lookup_symbol = debug_to_lookup_symbol;
3855 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
3856 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
3857 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
3858 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
3859 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
3860 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
3861 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
3862 current_target.to_has_exited = debug_to_has_exited;
3863 current_target.to_can_run = debug_to_can_run;
3864 current_target.to_notice_signals = debug_to_notice_signals;
3865 current_target.to_stop = debug_to_stop;
3866 current_target.to_rcmd = debug_to_rcmd;
3867 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
3868 current_target.to_thread_architecture = debug_to_thread_architecture;
3869 }
3870 \f
3871
3872 static char targ_desc[] =
3873 "Names of targets and files being debugged.\nShows the entire \
3874 stack of targets currently in use (including the exec-file,\n\
3875 core-file, and process, if any), as well as the symbol file name.";
3876
3877 static void
3878 do_monitor_command (char *cmd,
3879 int from_tty)
3880 {
3881 if ((current_target.to_rcmd
3882 == (void (*) (char *, struct ui_file *)) tcomplain)
3883 || (current_target.to_rcmd == debug_to_rcmd
3884 && (debug_target.to_rcmd
3885 == (void (*) (char *, struct ui_file *)) tcomplain)))
3886 error (_("\"monitor\" command not supported by this target."));
3887 target_rcmd (cmd, gdb_stdtarg);
3888 }
3889
3890 /* Print the name of each layers of our target stack. */
3891
3892 static void
3893 maintenance_print_target_stack (char *cmd, int from_tty)
3894 {
3895 struct target_ops *t;
3896
3897 printf_filtered (_("The current target stack is:\n"));
3898
3899 for (t = target_stack; t != NULL; t = t->beneath)
3900 {
3901 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3902 }
3903 }
3904
3905 /* Controls if async mode is permitted. */
3906 int target_async_permitted = 0;
3907
3908 /* The set command writes to this variable. If the inferior is
3909 executing, linux_nat_async_permitted is *not* updated. */
3910 static int target_async_permitted_1 = 0;
3911
3912 static void
3913 set_maintenance_target_async_permitted (char *args, int from_tty,
3914 struct cmd_list_element *c)
3915 {
3916 if (have_live_inferiors ())
3917 {
3918 target_async_permitted_1 = target_async_permitted;
3919 error (_("Cannot change this setting while the inferior is running."));
3920 }
3921
3922 target_async_permitted = target_async_permitted_1;
3923 }
3924
3925 static void
3926 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
3927 struct cmd_list_element *c,
3928 const char *value)
3929 {
3930 fprintf_filtered (file,
3931 _("Controlling the inferior in "
3932 "asynchronous mode is %s.\n"), value);
3933 }
3934
3935 /* Temporary copies of permission settings. */
3936
3937 static int may_write_registers_1 = 1;
3938 static int may_write_memory_1 = 1;
3939 static int may_insert_breakpoints_1 = 1;
3940 static int may_insert_tracepoints_1 = 1;
3941 static int may_insert_fast_tracepoints_1 = 1;
3942 static int may_stop_1 = 1;
3943
3944 /* Make the user-set values match the real values again. */
3945
3946 void
3947 update_target_permissions (void)
3948 {
3949 may_write_registers_1 = may_write_registers;
3950 may_write_memory_1 = may_write_memory;
3951 may_insert_breakpoints_1 = may_insert_breakpoints;
3952 may_insert_tracepoints_1 = may_insert_tracepoints;
3953 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3954 may_stop_1 = may_stop;
3955 }
3956
3957 /* The one function handles (most of) the permission flags in the same
3958 way. */
3959
3960 static void
3961 set_target_permissions (char *args, int from_tty,
3962 struct cmd_list_element *c)
3963 {
3964 if (target_has_execution)
3965 {
3966 update_target_permissions ();
3967 error (_("Cannot change this setting while the inferior is running."));
3968 }
3969
3970 /* Make the real values match the user-changed values. */
3971 may_write_registers = may_write_registers_1;
3972 may_insert_breakpoints = may_insert_breakpoints_1;
3973 may_insert_tracepoints = may_insert_tracepoints_1;
3974 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3975 may_stop = may_stop_1;
3976 update_observer_mode ();
3977 }
3978
3979 /* Set memory write permission independently of observer mode. */
3980
3981 static void
3982 set_write_memory_permission (char *args, int from_tty,
3983 struct cmd_list_element *c)
3984 {
3985 /* Make the real values match the user-changed values. */
3986 may_write_memory = may_write_memory_1;
3987 update_observer_mode ();
3988 }
3989
3990
3991 void
3992 initialize_targets (void)
3993 {
3994 init_dummy_target ();
3995 push_target (&dummy_target);
3996
3997 add_info ("target", target_info, targ_desc);
3998 add_info ("files", target_info, targ_desc);
3999
4000 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4001 Set target debugging."), _("\
4002 Show target debugging."), _("\
4003 When non-zero, target debugging is enabled. Higher numbers are more\n\
4004 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4005 command."),
4006 NULL,
4007 show_targetdebug,
4008 &setdebuglist, &showdebuglist);
4009
4010 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4011 &trust_readonly, _("\
4012 Set mode for reading from readonly sections."), _("\
4013 Show mode for reading from readonly sections."), _("\
4014 When this mode is on, memory reads from readonly sections (such as .text)\n\
4015 will be read from the object file instead of from the target. This will\n\
4016 result in significant performance improvement for remote targets."),
4017 NULL,
4018 show_trust_readonly,
4019 &setlist, &showlist);
4020
4021 add_com ("monitor", class_obscure, do_monitor_command,
4022 _("Send a command to the remote monitor (remote targets only)."));
4023
4024 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4025 _("Print the name of each layer of the internal target stack."),
4026 &maintenanceprintlist);
4027
4028 add_setshow_boolean_cmd ("target-async", no_class,
4029 &target_async_permitted_1, _("\
4030 Set whether gdb controls the inferior in asynchronous mode."), _("\
4031 Show whether gdb controls the inferior in asynchronous mode."), _("\
4032 Tells gdb whether to control the inferior in asynchronous mode."),
4033 set_maintenance_target_async_permitted,
4034 show_maintenance_target_async_permitted,
4035 &setlist,
4036 &showlist);
4037
4038 add_setshow_boolean_cmd ("stack-cache", class_support,
4039 &stack_cache_enabled_p_1, _("\
4040 Set cache use for stack access."), _("\
4041 Show cache use for stack access."), _("\
4042 When on, use the data cache for all stack access, regardless of any\n\
4043 configured memory regions. This improves remote performance significantly.\n\
4044 By default, caching for stack access is on."),
4045 set_stack_cache_enabled_p,
4046 show_stack_cache_enabled_p,
4047 &setlist, &showlist);
4048
4049 add_setshow_boolean_cmd ("may-write-registers", class_support,
4050 &may_write_registers_1, _("\
4051 Set permission to write into registers."), _("\
4052 Show permission to write into registers."), _("\
4053 When this permission is on, GDB may write into the target's registers.\n\
4054 Otherwise, any sort of write attempt will result in an error."),
4055 set_target_permissions, NULL,
4056 &setlist, &showlist);
4057
4058 add_setshow_boolean_cmd ("may-write-memory", class_support,
4059 &may_write_memory_1, _("\
4060 Set permission to write into target memory."), _("\
4061 Show permission to write into target memory."), _("\
4062 When this permission is on, GDB may write into the target's memory.\n\
4063 Otherwise, any sort of write attempt will result in an error."),
4064 set_write_memory_permission, NULL,
4065 &setlist, &showlist);
4066
4067 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4068 &may_insert_breakpoints_1, _("\
4069 Set permission to insert breakpoints in the target."), _("\
4070 Show permission to insert breakpoints in the target."), _("\
4071 When this permission is on, GDB may insert breakpoints in the program.\n\
4072 Otherwise, any sort of insertion attempt will result in an error."),
4073 set_target_permissions, NULL,
4074 &setlist, &showlist);
4075
4076 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4077 &may_insert_tracepoints_1, _("\
4078 Set permission to insert tracepoints in the target."), _("\
4079 Show permission to insert tracepoints in the target."), _("\
4080 When this permission is on, GDB may insert tracepoints in the program.\n\
4081 Otherwise, any sort of insertion attempt will result in an error."),
4082 set_target_permissions, NULL,
4083 &setlist, &showlist);
4084
4085 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4086 &may_insert_fast_tracepoints_1, _("\
4087 Set permission to insert fast tracepoints in the target."), _("\
4088 Show permission to insert fast tracepoints in the target."), _("\
4089 When this permission is on, GDB may insert fast tracepoints.\n\
4090 Otherwise, any sort of insertion attempt will result in an error."),
4091 set_target_permissions, NULL,
4092 &setlist, &showlist);
4093
4094 add_setshow_boolean_cmd ("may-interrupt", class_support,
4095 &may_stop_1, _("\
4096 Set permission to interrupt or signal the target."), _("\
4097 Show permission to interrupt or signal the target."), _("\
4098 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4099 Otherwise, any attempt to interrupt or stop will be ignored."),
4100 set_target_permissions, NULL,
4101 &setlist, &showlist);
4102
4103
4104 target_dcache = dcache_init ();
4105 }
This page took 0.148793 seconds and 4 git commands to generate.