2012-01-19 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdb_wait.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45
46 static void target_info (char *, int);
47
48 static void default_terminal_info (char *, int);
49
50 static int default_watchpoint_addr_within_range (struct target_ops *,
51 CORE_ADDR, CORE_ADDR, int);
52
53 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
54
55 static void tcomplain (void) ATTRIBUTE_NORETURN;
56
57 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
58
59 static int return_zero (void);
60
61 static int return_one (void);
62
63 static int return_minus_one (void);
64
65 void target_ignore (void);
66
67 static void target_command (char *, int);
68
69 static struct target_ops *find_default_run_target (char *);
70
71 static LONGEST default_xfer_partial (struct target_ops *ops,
72 enum target_object object,
73 const char *annex, gdb_byte *readbuf,
74 const gdb_byte *writebuf,
75 ULONGEST offset, LONGEST len);
76
77 static LONGEST current_xfer_partial (struct target_ops *ops,
78 enum target_object object,
79 const char *annex, gdb_byte *readbuf,
80 const gdb_byte *writebuf,
81 ULONGEST offset, LONGEST len);
82
83 static LONGEST target_xfer_partial (struct target_ops *ops,
84 enum target_object object,
85 const char *annex,
86 void *readbuf, const void *writebuf,
87 ULONGEST offset, LONGEST len);
88
89 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
90 ptid_t ptid);
91
92 static void init_dummy_target (void);
93
94 static struct target_ops debug_target;
95
96 static void debug_to_open (char *, int);
97
98 static void debug_to_prepare_to_store (struct regcache *);
99
100 static void debug_to_files_info (struct target_ops *);
101
102 static int debug_to_insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *);
104
105 static int debug_to_remove_breakpoint (struct gdbarch *,
106 struct bp_target_info *);
107
108 static int debug_to_can_use_hw_breakpoint (int, int, int);
109
110 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
111 struct bp_target_info *);
112
113 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
114 struct bp_target_info *);
115
116 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
120 struct expression *);
121
122 static int debug_to_stopped_by_watchpoint (void);
123
124 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
125
126 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
127 CORE_ADDR, CORE_ADDR, int);
128
129 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
132 struct expression *);
133
134 static void debug_to_terminal_init (void);
135
136 static void debug_to_terminal_inferior (void);
137
138 static void debug_to_terminal_ours_for_output (void);
139
140 static void debug_to_terminal_save_ours (void);
141
142 static void debug_to_terminal_ours (void);
143
144 static void debug_to_terminal_info (char *, int);
145
146 static void debug_to_load (char *, int);
147
148 static int debug_to_can_run (void);
149
150 static void debug_to_stop (ptid_t);
151
152 /* Pointer to array of target architecture structures; the size of the
153 array; the current index into the array; the allocated size of the
154 array. */
155 struct target_ops **target_structs;
156 unsigned target_struct_size;
157 unsigned target_struct_index;
158 unsigned target_struct_allocsize;
159 #define DEFAULT_ALLOCSIZE 10
160
161 /* The initial current target, so that there is always a semi-valid
162 current target. */
163
164 static struct target_ops dummy_target;
165
166 /* Top of target stack. */
167
168 static struct target_ops *target_stack;
169
170 /* The target structure we are currently using to talk to a process
171 or file or whatever "inferior" we have. */
172
173 struct target_ops current_target;
174
175 /* Command list for target. */
176
177 static struct cmd_list_element *targetlist = NULL;
178
179 /* Nonzero if we should trust readonly sections from the
180 executable when reading memory. */
181
182 static int trust_readonly = 0;
183
184 /* Nonzero if we should show true memory content including
185 memory breakpoint inserted by gdb. */
186
187 static int show_memory_breakpoints = 0;
188
189 /* These globals control whether GDB attempts to perform these
190 operations; they are useful for targets that need to prevent
191 inadvertant disruption, such as in non-stop mode. */
192
193 int may_write_registers = 1;
194
195 int may_write_memory = 1;
196
197 int may_insert_breakpoints = 1;
198
199 int may_insert_tracepoints = 1;
200
201 int may_insert_fast_tracepoints = 1;
202
203 int may_stop = 1;
204
205 /* Non-zero if we want to see trace of target level stuff. */
206
207 static int targetdebug = 0;
208 static void
209 show_targetdebug (struct ui_file *file, int from_tty,
210 struct cmd_list_element *c, const char *value)
211 {
212 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
213 }
214
215 static void setup_target_debug (void);
216
217 /* The option sets this. */
218 static int stack_cache_enabled_p_1 = 1;
219 /* And set_stack_cache_enabled_p updates this.
220 The reason for the separation is so that we don't flush the cache for
221 on->on transitions. */
222 static int stack_cache_enabled_p = 1;
223
224 /* This is called *after* the stack-cache has been set.
225 Flush the cache for off->on and on->off transitions.
226 There's no real need to flush the cache for on->off transitions,
227 except cleanliness. */
228
229 static void
230 set_stack_cache_enabled_p (char *args, int from_tty,
231 struct cmd_list_element *c)
232 {
233 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
234 target_dcache_invalidate ();
235
236 stack_cache_enabled_p = stack_cache_enabled_p_1;
237 }
238
239 static void
240 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242 {
243 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
244 }
245
246 /* Cache of memory operations, to speed up remote access. */
247 static DCACHE *target_dcache;
248
249 /* Invalidate the target dcache. */
250
251 void
252 target_dcache_invalidate (void)
253 {
254 dcache_invalidate (target_dcache);
255 }
256
257 /* The user just typed 'target' without the name of a target. */
258
259 static void
260 target_command (char *arg, int from_tty)
261 {
262 fputs_filtered ("Argument required (target name). Try `help target'\n",
263 gdb_stdout);
264 }
265
266 /* Default target_has_* methods for process_stratum targets. */
267
268 int
269 default_child_has_all_memory (struct target_ops *ops)
270 {
271 /* If no inferior selected, then we can't read memory here. */
272 if (ptid_equal (inferior_ptid, null_ptid))
273 return 0;
274
275 return 1;
276 }
277
278 int
279 default_child_has_memory (struct target_ops *ops)
280 {
281 /* If no inferior selected, then we can't read memory here. */
282 if (ptid_equal (inferior_ptid, null_ptid))
283 return 0;
284
285 return 1;
286 }
287
288 int
289 default_child_has_stack (struct target_ops *ops)
290 {
291 /* If no inferior selected, there's no stack. */
292 if (ptid_equal (inferior_ptid, null_ptid))
293 return 0;
294
295 return 1;
296 }
297
298 int
299 default_child_has_registers (struct target_ops *ops)
300 {
301 /* Can't read registers from no inferior. */
302 if (ptid_equal (inferior_ptid, null_ptid))
303 return 0;
304
305 return 1;
306 }
307
308 int
309 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
310 {
311 /* If there's no thread selected, then we can't make it run through
312 hoops. */
313 if (ptid_equal (the_ptid, null_ptid))
314 return 0;
315
316 return 1;
317 }
318
319
320 int
321 target_has_all_memory_1 (void)
322 {
323 struct target_ops *t;
324
325 for (t = current_target.beneath; t != NULL; t = t->beneath)
326 if (t->to_has_all_memory (t))
327 return 1;
328
329 return 0;
330 }
331
332 int
333 target_has_memory_1 (void)
334 {
335 struct target_ops *t;
336
337 for (t = current_target.beneath; t != NULL; t = t->beneath)
338 if (t->to_has_memory (t))
339 return 1;
340
341 return 0;
342 }
343
344 int
345 target_has_stack_1 (void)
346 {
347 struct target_ops *t;
348
349 for (t = current_target.beneath; t != NULL; t = t->beneath)
350 if (t->to_has_stack (t))
351 return 1;
352
353 return 0;
354 }
355
356 int
357 target_has_registers_1 (void)
358 {
359 struct target_ops *t;
360
361 for (t = current_target.beneath; t != NULL; t = t->beneath)
362 if (t->to_has_registers (t))
363 return 1;
364
365 return 0;
366 }
367
368 int
369 target_has_execution_1 (ptid_t the_ptid)
370 {
371 struct target_ops *t;
372
373 for (t = current_target.beneath; t != NULL; t = t->beneath)
374 if (t->to_has_execution (t, the_ptid))
375 return 1;
376
377 return 0;
378 }
379
380 int
381 target_has_execution_current (void)
382 {
383 return target_has_execution_1 (inferior_ptid);
384 }
385
386 /* Add a possible target architecture to the list. */
387
388 void
389 add_target (struct target_ops *t)
390 {
391 /* Provide default values for all "must have" methods. */
392 if (t->to_xfer_partial == NULL)
393 t->to_xfer_partial = default_xfer_partial;
394
395 if (t->to_has_all_memory == NULL)
396 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
397
398 if (t->to_has_memory == NULL)
399 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
400
401 if (t->to_has_stack == NULL)
402 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
403
404 if (t->to_has_registers == NULL)
405 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
406
407 if (t->to_has_execution == NULL)
408 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
409
410 if (!target_structs)
411 {
412 target_struct_allocsize = DEFAULT_ALLOCSIZE;
413 target_structs = (struct target_ops **) xmalloc
414 (target_struct_allocsize * sizeof (*target_structs));
415 }
416 if (target_struct_size >= target_struct_allocsize)
417 {
418 target_struct_allocsize *= 2;
419 target_structs = (struct target_ops **)
420 xrealloc ((char *) target_structs,
421 target_struct_allocsize * sizeof (*target_structs));
422 }
423 target_structs[target_struct_size++] = t;
424
425 if (targetlist == NULL)
426 add_prefix_cmd ("target", class_run, target_command, _("\
427 Connect to a target machine or process.\n\
428 The first argument is the type or protocol of the target machine.\n\
429 Remaining arguments are interpreted by the target protocol. For more\n\
430 information on the arguments for a particular protocol, type\n\
431 `help target ' followed by the protocol name."),
432 &targetlist, "target ", 0, &cmdlist);
433 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) ();
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (void)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 #define INHERIT(FIELD, TARGET) \
578 if (!current_target.FIELD) \
579 current_target.FIELD = (TARGET)->FIELD
580
581 for (t = target_stack; t; t = t->beneath)
582 {
583 INHERIT (to_shortname, t);
584 INHERIT (to_longname, t);
585 INHERIT (to_doc, t);
586 /* Do not inherit to_open. */
587 /* Do not inherit to_close. */
588 /* Do not inherit to_attach. */
589 INHERIT (to_post_attach, t);
590 INHERIT (to_attach_no_wait, t);
591 /* Do not inherit to_detach. */
592 /* Do not inherit to_disconnect. */
593 /* Do not inherit to_resume. */
594 /* Do not inherit to_wait. */
595 /* Do not inherit to_fetch_registers. */
596 /* Do not inherit to_store_registers. */
597 INHERIT (to_prepare_to_store, t);
598 INHERIT (deprecated_xfer_memory, t);
599 INHERIT (to_files_info, t);
600 INHERIT (to_insert_breakpoint, t);
601 INHERIT (to_remove_breakpoint, t);
602 INHERIT (to_can_use_hw_breakpoint, t);
603 INHERIT (to_insert_hw_breakpoint, t);
604 INHERIT (to_remove_hw_breakpoint, t);
605 /* Do not inherit to_ranged_break_num_registers. */
606 INHERIT (to_insert_watchpoint, t);
607 INHERIT (to_remove_watchpoint, t);
608 /* Do not inherit to_insert_mask_watchpoint. */
609 /* Do not inherit to_remove_mask_watchpoint. */
610 INHERIT (to_stopped_data_address, t);
611 INHERIT (to_have_steppable_watchpoint, t);
612 INHERIT (to_have_continuable_watchpoint, t);
613 INHERIT (to_stopped_by_watchpoint, t);
614 INHERIT (to_watchpoint_addr_within_range, t);
615 INHERIT (to_region_ok_for_hw_watchpoint, t);
616 INHERIT (to_can_accel_watchpoint_condition, t);
617 /* Do not inherit to_masked_watch_num_registers. */
618 INHERIT (to_terminal_init, t);
619 INHERIT (to_terminal_inferior, t);
620 INHERIT (to_terminal_ours_for_output, t);
621 INHERIT (to_terminal_ours, t);
622 INHERIT (to_terminal_save_ours, t);
623 INHERIT (to_terminal_info, t);
624 /* Do not inherit to_kill. */
625 INHERIT (to_load, t);
626 /* Do no inherit to_create_inferior. */
627 INHERIT (to_post_startup_inferior, t);
628 INHERIT (to_insert_fork_catchpoint, t);
629 INHERIT (to_remove_fork_catchpoint, t);
630 INHERIT (to_insert_vfork_catchpoint, t);
631 INHERIT (to_remove_vfork_catchpoint, t);
632 /* Do not inherit to_follow_fork. */
633 INHERIT (to_insert_exec_catchpoint, t);
634 INHERIT (to_remove_exec_catchpoint, t);
635 INHERIT (to_set_syscall_catchpoint, t);
636 INHERIT (to_has_exited, t);
637 /* Do not inherit to_mourn_inferior. */
638 INHERIT (to_can_run, t);
639 /* Do not inherit to_pass_signals. */
640 /* Do not inherit to_thread_alive. */
641 /* Do not inherit to_find_new_threads. */
642 /* Do not inherit to_pid_to_str. */
643 INHERIT (to_extra_thread_info, t);
644 INHERIT (to_thread_name, t);
645 INHERIT (to_stop, t);
646 /* Do not inherit to_xfer_partial. */
647 INHERIT (to_rcmd, t);
648 INHERIT (to_pid_to_exec_file, t);
649 INHERIT (to_log_command, t);
650 INHERIT (to_stratum, t);
651 /* Do not inherit to_has_all_memory. */
652 /* Do not inherit to_has_memory. */
653 /* Do not inherit to_has_stack. */
654 /* Do not inherit to_has_registers. */
655 /* Do not inherit to_has_execution. */
656 INHERIT (to_has_thread_control, t);
657 INHERIT (to_can_async_p, t);
658 INHERIT (to_is_async_p, t);
659 INHERIT (to_async, t);
660 INHERIT (to_find_memory_regions, t);
661 INHERIT (to_make_corefile_notes, t);
662 INHERIT (to_get_bookmark, t);
663 INHERIT (to_goto_bookmark, t);
664 /* Do not inherit to_get_thread_local_address. */
665 INHERIT (to_can_execute_reverse, t);
666 INHERIT (to_execution_direction, t);
667 INHERIT (to_thread_architecture, t);
668 /* Do not inherit to_read_description. */
669 INHERIT (to_get_ada_task_ptid, t);
670 /* Do not inherit to_search_memory. */
671 INHERIT (to_supports_multi_process, t);
672 INHERIT (to_supports_enable_disable_tracepoint, t);
673 INHERIT (to_supports_string_tracing, t);
674 INHERIT (to_trace_init, t);
675 INHERIT (to_download_tracepoint, t);
676 INHERIT (to_can_download_tracepoint, t);
677 INHERIT (to_download_trace_state_variable, t);
678 INHERIT (to_enable_tracepoint, t);
679 INHERIT (to_disable_tracepoint, t);
680 INHERIT (to_trace_set_readonly_regions, t);
681 INHERIT (to_trace_start, t);
682 INHERIT (to_get_trace_status, t);
683 INHERIT (to_get_tracepoint_status, t);
684 INHERIT (to_trace_stop, t);
685 INHERIT (to_trace_find, t);
686 INHERIT (to_get_trace_state_variable_value, t);
687 INHERIT (to_save_trace_data, t);
688 INHERIT (to_upload_tracepoints, t);
689 INHERIT (to_upload_trace_state_variables, t);
690 INHERIT (to_get_raw_trace_data, t);
691 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
692 INHERIT (to_set_disconnected_tracing, t);
693 INHERIT (to_set_circular_trace_buffer, t);
694 INHERIT (to_set_trace_notes, t);
695 INHERIT (to_get_tib_address, t);
696 INHERIT (to_set_permissions, t);
697 INHERIT (to_static_tracepoint_marker_at, t);
698 INHERIT (to_static_tracepoint_markers_by_strid, t);
699 INHERIT (to_traceframe_info, t);
700 INHERIT (to_magic, t);
701 /* Do not inherit to_memory_map. */
702 /* Do not inherit to_flash_erase. */
703 /* Do not inherit to_flash_done. */
704 }
705 #undef INHERIT
706
707 /* Clean up a target struct so it no longer has any zero pointers in
708 it. Some entries are defaulted to a method that print an error,
709 others are hard-wired to a standard recursive default. */
710
711 #define de_fault(field, value) \
712 if (!current_target.field) \
713 current_target.field = value
714
715 de_fault (to_open,
716 (void (*) (char *, int))
717 tcomplain);
718 de_fault (to_close,
719 (void (*) (int))
720 target_ignore);
721 de_fault (to_post_attach,
722 (void (*) (int))
723 target_ignore);
724 de_fault (to_prepare_to_store,
725 (void (*) (struct regcache *))
726 noprocess);
727 de_fault (deprecated_xfer_memory,
728 (int (*) (CORE_ADDR, gdb_byte *, int, int,
729 struct mem_attrib *, struct target_ops *))
730 nomemory);
731 de_fault (to_files_info,
732 (void (*) (struct target_ops *))
733 target_ignore);
734 de_fault (to_insert_breakpoint,
735 memory_insert_breakpoint);
736 de_fault (to_remove_breakpoint,
737 memory_remove_breakpoint);
738 de_fault (to_can_use_hw_breakpoint,
739 (int (*) (int, int, int))
740 return_zero);
741 de_fault (to_insert_hw_breakpoint,
742 (int (*) (struct gdbarch *, struct bp_target_info *))
743 return_minus_one);
744 de_fault (to_remove_hw_breakpoint,
745 (int (*) (struct gdbarch *, struct bp_target_info *))
746 return_minus_one);
747 de_fault (to_insert_watchpoint,
748 (int (*) (CORE_ADDR, int, int, struct expression *))
749 return_minus_one);
750 de_fault (to_remove_watchpoint,
751 (int (*) (CORE_ADDR, int, int, struct expression *))
752 return_minus_one);
753 de_fault (to_stopped_by_watchpoint,
754 (int (*) (void))
755 return_zero);
756 de_fault (to_stopped_data_address,
757 (int (*) (struct target_ops *, CORE_ADDR *))
758 return_zero);
759 de_fault (to_watchpoint_addr_within_range,
760 default_watchpoint_addr_within_range);
761 de_fault (to_region_ok_for_hw_watchpoint,
762 default_region_ok_for_hw_watchpoint);
763 de_fault (to_can_accel_watchpoint_condition,
764 (int (*) (CORE_ADDR, int, int, struct expression *))
765 return_zero);
766 de_fault (to_terminal_init,
767 (void (*) (void))
768 target_ignore);
769 de_fault (to_terminal_inferior,
770 (void (*) (void))
771 target_ignore);
772 de_fault (to_terminal_ours_for_output,
773 (void (*) (void))
774 target_ignore);
775 de_fault (to_terminal_ours,
776 (void (*) (void))
777 target_ignore);
778 de_fault (to_terminal_save_ours,
779 (void (*) (void))
780 target_ignore);
781 de_fault (to_terminal_info,
782 default_terminal_info);
783 de_fault (to_load,
784 (void (*) (char *, int))
785 tcomplain);
786 de_fault (to_post_startup_inferior,
787 (void (*) (ptid_t))
788 target_ignore);
789 de_fault (to_insert_fork_catchpoint,
790 (int (*) (int))
791 return_one);
792 de_fault (to_remove_fork_catchpoint,
793 (int (*) (int))
794 return_one);
795 de_fault (to_insert_vfork_catchpoint,
796 (int (*) (int))
797 return_one);
798 de_fault (to_remove_vfork_catchpoint,
799 (int (*) (int))
800 return_one);
801 de_fault (to_insert_exec_catchpoint,
802 (int (*) (int))
803 return_one);
804 de_fault (to_remove_exec_catchpoint,
805 (int (*) (int))
806 return_one);
807 de_fault (to_set_syscall_catchpoint,
808 (int (*) (int, int, int, int, int *))
809 return_one);
810 de_fault (to_has_exited,
811 (int (*) (int, int, int *))
812 return_zero);
813 de_fault (to_can_run,
814 return_zero);
815 de_fault (to_extra_thread_info,
816 (char *(*) (struct thread_info *))
817 return_zero);
818 de_fault (to_thread_name,
819 (char *(*) (struct thread_info *))
820 return_zero);
821 de_fault (to_stop,
822 (void (*) (ptid_t))
823 target_ignore);
824 current_target.to_xfer_partial = current_xfer_partial;
825 de_fault (to_rcmd,
826 (void (*) (char *, struct ui_file *))
827 tcomplain);
828 de_fault (to_pid_to_exec_file,
829 (char *(*) (int))
830 return_zero);
831 de_fault (to_async,
832 (void (*) (void (*) (enum inferior_event_type, void*), void*))
833 tcomplain);
834 de_fault (to_thread_architecture,
835 default_thread_architecture);
836 current_target.to_read_description = NULL;
837 de_fault (to_get_ada_task_ptid,
838 (ptid_t (*) (long, long))
839 default_get_ada_task_ptid);
840 de_fault (to_supports_multi_process,
841 (int (*) (void))
842 return_zero);
843 de_fault (to_supports_enable_disable_tracepoint,
844 (int (*) (void))
845 return_zero);
846 de_fault (to_supports_string_tracing,
847 (int (*) (void))
848 return_zero);
849 de_fault (to_trace_init,
850 (void (*) (void))
851 tcomplain);
852 de_fault (to_download_tracepoint,
853 (void (*) (struct bp_location *))
854 tcomplain);
855 de_fault (to_can_download_tracepoint,
856 (int (*) (void))
857 return_zero);
858 de_fault (to_download_trace_state_variable,
859 (void (*) (struct trace_state_variable *))
860 tcomplain);
861 de_fault (to_enable_tracepoint,
862 (void (*) (struct bp_location *))
863 tcomplain);
864 de_fault (to_disable_tracepoint,
865 (void (*) (struct bp_location *))
866 tcomplain);
867 de_fault (to_trace_set_readonly_regions,
868 (void (*) (void))
869 tcomplain);
870 de_fault (to_trace_start,
871 (void (*) (void))
872 tcomplain);
873 de_fault (to_get_trace_status,
874 (int (*) (struct trace_status *))
875 return_minus_one);
876 de_fault (to_get_tracepoint_status,
877 (void (*) (struct breakpoint *, struct uploaded_tp *))
878 tcomplain);
879 de_fault (to_trace_stop,
880 (void (*) (void))
881 tcomplain);
882 de_fault (to_trace_find,
883 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
884 return_minus_one);
885 de_fault (to_get_trace_state_variable_value,
886 (int (*) (int, LONGEST *))
887 return_zero);
888 de_fault (to_save_trace_data,
889 (int (*) (const char *))
890 tcomplain);
891 de_fault (to_upload_tracepoints,
892 (int (*) (struct uploaded_tp **))
893 return_zero);
894 de_fault (to_upload_trace_state_variables,
895 (int (*) (struct uploaded_tsv **))
896 return_zero);
897 de_fault (to_get_raw_trace_data,
898 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
899 tcomplain);
900 de_fault (to_get_min_fast_tracepoint_insn_len,
901 (int (*) (void))
902 return_minus_one);
903 de_fault (to_set_disconnected_tracing,
904 (void (*) (int))
905 target_ignore);
906 de_fault (to_set_circular_trace_buffer,
907 (void (*) (int))
908 target_ignore);
909 de_fault (to_set_trace_notes,
910 (int (*) (char *, char *, char *))
911 return_zero);
912 de_fault (to_get_tib_address,
913 (int (*) (ptid_t, CORE_ADDR *))
914 tcomplain);
915 de_fault (to_set_permissions,
916 (void (*) (void))
917 target_ignore);
918 de_fault (to_static_tracepoint_marker_at,
919 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
920 return_zero);
921 de_fault (to_static_tracepoint_markers_by_strid,
922 (VEC(static_tracepoint_marker_p) * (*) (const char *))
923 tcomplain);
924 de_fault (to_traceframe_info,
925 (struct traceframe_info * (*) (void))
926 tcomplain);
927 de_fault (to_execution_direction, default_execution_direction);
928
929 #undef de_fault
930
931 /* Finally, position the target-stack beneath the squashed
932 "current_target". That way code looking for a non-inherited
933 target method can quickly and simply find it. */
934 current_target.beneath = target_stack;
935
936 if (targetdebug)
937 setup_target_debug ();
938 }
939
940 /* Push a new target type into the stack of the existing target accessors,
941 possibly superseding some of the existing accessors.
942
943 Rather than allow an empty stack, we always have the dummy target at
944 the bottom stratum, so we can call the function vectors without
945 checking them. */
946
947 void
948 push_target (struct target_ops *t)
949 {
950 struct target_ops **cur;
951
952 /* Check magic number. If wrong, it probably means someone changed
953 the struct definition, but not all the places that initialize one. */
954 if (t->to_magic != OPS_MAGIC)
955 {
956 fprintf_unfiltered (gdb_stderr,
957 "Magic number of %s target struct wrong\n",
958 t->to_shortname);
959 internal_error (__FILE__, __LINE__,
960 _("failed internal consistency check"));
961 }
962
963 /* Find the proper stratum to install this target in. */
964 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
965 {
966 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
967 break;
968 }
969
970 /* If there's already targets at this stratum, remove them. */
971 /* FIXME: cagney/2003-10-15: I think this should be popping all
972 targets to CUR, and not just those at this stratum level. */
973 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
974 {
975 /* There's already something at this stratum level. Close it,
976 and un-hook it from the stack. */
977 struct target_ops *tmp = (*cur);
978
979 (*cur) = (*cur)->beneath;
980 tmp->beneath = NULL;
981 target_close (tmp, 0);
982 }
983
984 /* We have removed all targets in our stratum, now add the new one. */
985 t->beneath = (*cur);
986 (*cur) = t;
987
988 update_current_target ();
989 }
990
991 /* Remove a target_ops vector from the stack, wherever it may be.
992 Return how many times it was removed (0 or 1). */
993
994 int
995 unpush_target (struct target_ops *t)
996 {
997 struct target_ops **cur;
998 struct target_ops *tmp;
999
1000 if (t->to_stratum == dummy_stratum)
1001 internal_error (__FILE__, __LINE__,
1002 _("Attempt to unpush the dummy target"));
1003
1004 /* Look for the specified target. Note that we assume that a target
1005 can only occur once in the target stack. */
1006
1007 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1008 {
1009 if ((*cur) == t)
1010 break;
1011 }
1012
1013 /* If we don't find target_ops, quit. Only open targets should be
1014 closed. */
1015 if ((*cur) == NULL)
1016 return 0;
1017
1018 /* Unchain the target. */
1019 tmp = (*cur);
1020 (*cur) = (*cur)->beneath;
1021 tmp->beneath = NULL;
1022
1023 update_current_target ();
1024
1025 /* Finally close the target. Note we do this after unchaining, so
1026 any target method calls from within the target_close
1027 implementation don't end up in T anymore. */
1028 target_close (t, 0);
1029
1030 return 1;
1031 }
1032
1033 void
1034 pop_target (void)
1035 {
1036 target_close (target_stack, 0); /* Let it clean up. */
1037 if (unpush_target (target_stack) == 1)
1038 return;
1039
1040 fprintf_unfiltered (gdb_stderr,
1041 "pop_target couldn't find target %s\n",
1042 current_target.to_shortname);
1043 internal_error (__FILE__, __LINE__,
1044 _("failed internal consistency check"));
1045 }
1046
1047 void
1048 pop_all_targets_above (enum strata above_stratum, int quitting)
1049 {
1050 while ((int) (current_target.to_stratum) > (int) above_stratum)
1051 {
1052 target_close (target_stack, quitting);
1053 if (!unpush_target (target_stack))
1054 {
1055 fprintf_unfiltered (gdb_stderr,
1056 "pop_all_targets couldn't find target %s\n",
1057 target_stack->to_shortname);
1058 internal_error (__FILE__, __LINE__,
1059 _("failed internal consistency check"));
1060 break;
1061 }
1062 }
1063 }
1064
1065 void
1066 pop_all_targets (int quitting)
1067 {
1068 pop_all_targets_above (dummy_stratum, quitting);
1069 }
1070
1071 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1072
1073 int
1074 target_is_pushed (struct target_ops *t)
1075 {
1076 struct target_ops **cur;
1077
1078 /* Check magic number. If wrong, it probably means someone changed
1079 the struct definition, but not all the places that initialize one. */
1080 if (t->to_magic != OPS_MAGIC)
1081 {
1082 fprintf_unfiltered (gdb_stderr,
1083 "Magic number of %s target struct wrong\n",
1084 t->to_shortname);
1085 internal_error (__FILE__, __LINE__,
1086 _("failed internal consistency check"));
1087 }
1088
1089 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1090 if (*cur == t)
1091 return 1;
1092
1093 return 0;
1094 }
1095
1096 /* Using the objfile specified in OBJFILE, find the address for the
1097 current thread's thread-local storage with offset OFFSET. */
1098 CORE_ADDR
1099 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1100 {
1101 volatile CORE_ADDR addr = 0;
1102 struct target_ops *target;
1103
1104 for (target = current_target.beneath;
1105 target != NULL;
1106 target = target->beneath)
1107 {
1108 if (target->to_get_thread_local_address != NULL)
1109 break;
1110 }
1111
1112 if (target != NULL
1113 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1114 {
1115 ptid_t ptid = inferior_ptid;
1116 volatile struct gdb_exception ex;
1117
1118 TRY_CATCH (ex, RETURN_MASK_ALL)
1119 {
1120 CORE_ADDR lm_addr;
1121
1122 /* Fetch the load module address for this objfile. */
1123 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1124 objfile);
1125 /* If it's 0, throw the appropriate exception. */
1126 if (lm_addr == 0)
1127 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1128 _("TLS load module not found"));
1129
1130 addr = target->to_get_thread_local_address (target, ptid,
1131 lm_addr, offset);
1132 }
1133 /* If an error occurred, print TLS related messages here. Otherwise,
1134 throw the error to some higher catcher. */
1135 if (ex.reason < 0)
1136 {
1137 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1138
1139 switch (ex.error)
1140 {
1141 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1142 error (_("Cannot find thread-local variables "
1143 "in this thread library."));
1144 break;
1145 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1146 if (objfile_is_library)
1147 error (_("Cannot find shared library `%s' in dynamic"
1148 " linker's load module list"), objfile->name);
1149 else
1150 error (_("Cannot find executable file `%s' in dynamic"
1151 " linker's load module list"), objfile->name);
1152 break;
1153 case TLS_NOT_ALLOCATED_YET_ERROR:
1154 if (objfile_is_library)
1155 error (_("The inferior has not yet allocated storage for"
1156 " thread-local variables in\n"
1157 "the shared library `%s'\n"
1158 "for %s"),
1159 objfile->name, target_pid_to_str (ptid));
1160 else
1161 error (_("The inferior has not yet allocated storage for"
1162 " thread-local variables in\n"
1163 "the executable `%s'\n"
1164 "for %s"),
1165 objfile->name, target_pid_to_str (ptid));
1166 break;
1167 case TLS_GENERIC_ERROR:
1168 if (objfile_is_library)
1169 error (_("Cannot find thread-local storage for %s, "
1170 "shared library %s:\n%s"),
1171 target_pid_to_str (ptid),
1172 objfile->name, ex.message);
1173 else
1174 error (_("Cannot find thread-local storage for %s, "
1175 "executable file %s:\n%s"),
1176 target_pid_to_str (ptid),
1177 objfile->name, ex.message);
1178 break;
1179 default:
1180 throw_exception (ex);
1181 break;
1182 }
1183 }
1184 }
1185 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1186 TLS is an ABI-specific thing. But we don't do that yet. */
1187 else
1188 error (_("Cannot find thread-local variables on this target"));
1189
1190 return addr;
1191 }
1192
1193 #undef MIN
1194 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1195
1196 /* target_read_string -- read a null terminated string, up to LEN bytes,
1197 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1198 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1199 is responsible for freeing it. Return the number of bytes successfully
1200 read. */
1201
1202 int
1203 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1204 {
1205 int tlen, origlen, offset, i;
1206 gdb_byte buf[4];
1207 int errcode = 0;
1208 char *buffer;
1209 int buffer_allocated;
1210 char *bufptr;
1211 unsigned int nbytes_read = 0;
1212
1213 gdb_assert (string);
1214
1215 /* Small for testing. */
1216 buffer_allocated = 4;
1217 buffer = xmalloc (buffer_allocated);
1218 bufptr = buffer;
1219
1220 origlen = len;
1221
1222 while (len > 0)
1223 {
1224 tlen = MIN (len, 4 - (memaddr & 3));
1225 offset = memaddr & 3;
1226
1227 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1228 if (errcode != 0)
1229 {
1230 /* The transfer request might have crossed the boundary to an
1231 unallocated region of memory. Retry the transfer, requesting
1232 a single byte. */
1233 tlen = 1;
1234 offset = 0;
1235 errcode = target_read_memory (memaddr, buf, 1);
1236 if (errcode != 0)
1237 goto done;
1238 }
1239
1240 if (bufptr - buffer + tlen > buffer_allocated)
1241 {
1242 unsigned int bytes;
1243
1244 bytes = bufptr - buffer;
1245 buffer_allocated *= 2;
1246 buffer = xrealloc (buffer, buffer_allocated);
1247 bufptr = buffer + bytes;
1248 }
1249
1250 for (i = 0; i < tlen; i++)
1251 {
1252 *bufptr++ = buf[i + offset];
1253 if (buf[i + offset] == '\000')
1254 {
1255 nbytes_read += i + 1;
1256 goto done;
1257 }
1258 }
1259
1260 memaddr += tlen;
1261 len -= tlen;
1262 nbytes_read += tlen;
1263 }
1264 done:
1265 *string = buffer;
1266 if (errnop != NULL)
1267 *errnop = errcode;
1268 return nbytes_read;
1269 }
1270
1271 struct target_section_table *
1272 target_get_section_table (struct target_ops *target)
1273 {
1274 struct target_ops *t;
1275
1276 if (targetdebug)
1277 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1278
1279 for (t = target; t != NULL; t = t->beneath)
1280 if (t->to_get_section_table != NULL)
1281 return (*t->to_get_section_table) (t);
1282
1283 return NULL;
1284 }
1285
1286 /* Find a section containing ADDR. */
1287
1288 struct target_section *
1289 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1290 {
1291 struct target_section_table *table = target_get_section_table (target);
1292 struct target_section *secp;
1293
1294 if (table == NULL)
1295 return NULL;
1296
1297 for (secp = table->sections; secp < table->sections_end; secp++)
1298 {
1299 if (addr >= secp->addr && addr < secp->endaddr)
1300 return secp;
1301 }
1302 return NULL;
1303 }
1304
1305 /* Read memory from the live target, even if currently inspecting a
1306 traceframe. The return is the same as that of target_read. */
1307
1308 static LONGEST
1309 target_read_live_memory (enum target_object object,
1310 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1311 {
1312 int ret;
1313 struct cleanup *cleanup;
1314
1315 /* Switch momentarily out of tfind mode so to access live memory.
1316 Note that this must not clear global state, such as the frame
1317 cache, which must still remain valid for the previous traceframe.
1318 We may be _building_ the frame cache at this point. */
1319 cleanup = make_cleanup_restore_traceframe_number ();
1320 set_traceframe_number (-1);
1321
1322 ret = target_read (current_target.beneath, object, NULL,
1323 myaddr, memaddr, len);
1324
1325 do_cleanups (cleanup);
1326 return ret;
1327 }
1328
1329 /* Using the set of read-only target sections of OPS, read live
1330 read-only memory. Note that the actual reads start from the
1331 top-most target again.
1332
1333 For interface/parameters/return description see target.h,
1334 to_xfer_partial. */
1335
1336 static LONGEST
1337 memory_xfer_live_readonly_partial (struct target_ops *ops,
1338 enum target_object object,
1339 gdb_byte *readbuf, ULONGEST memaddr,
1340 LONGEST len)
1341 {
1342 struct target_section *secp;
1343 struct target_section_table *table;
1344
1345 secp = target_section_by_addr (ops, memaddr);
1346 if (secp != NULL
1347 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1348 & SEC_READONLY))
1349 {
1350 struct target_section *p;
1351 ULONGEST memend = memaddr + len;
1352
1353 table = target_get_section_table (ops);
1354
1355 for (p = table->sections; p < table->sections_end; p++)
1356 {
1357 if (memaddr >= p->addr)
1358 {
1359 if (memend <= p->endaddr)
1360 {
1361 /* Entire transfer is within this section. */
1362 return target_read_live_memory (object, memaddr,
1363 readbuf, len);
1364 }
1365 else if (memaddr >= p->endaddr)
1366 {
1367 /* This section ends before the transfer starts. */
1368 continue;
1369 }
1370 else
1371 {
1372 /* This section overlaps the transfer. Just do half. */
1373 len = p->endaddr - memaddr;
1374 return target_read_live_memory (object, memaddr,
1375 readbuf, len);
1376 }
1377 }
1378 }
1379 }
1380
1381 return 0;
1382 }
1383
1384 /* Perform a partial memory transfer.
1385 For docs see target.h, to_xfer_partial. */
1386
1387 static LONGEST
1388 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1389 void *readbuf, const void *writebuf, ULONGEST memaddr,
1390 LONGEST len)
1391 {
1392 LONGEST res;
1393 int reg_len;
1394 struct mem_region *region;
1395 struct inferior *inf;
1396
1397 /* For accesses to unmapped overlay sections, read directly from
1398 files. Must do this first, as MEMADDR may need adjustment. */
1399 if (readbuf != NULL && overlay_debugging)
1400 {
1401 struct obj_section *section = find_pc_overlay (memaddr);
1402
1403 if (pc_in_unmapped_range (memaddr, section))
1404 {
1405 struct target_section_table *table
1406 = target_get_section_table (ops);
1407 const char *section_name = section->the_bfd_section->name;
1408
1409 memaddr = overlay_mapped_address (memaddr, section);
1410 return section_table_xfer_memory_partial (readbuf, writebuf,
1411 memaddr, len,
1412 table->sections,
1413 table->sections_end,
1414 section_name);
1415 }
1416 }
1417
1418 /* Try the executable files, if "trust-readonly-sections" is set. */
1419 if (readbuf != NULL && trust_readonly)
1420 {
1421 struct target_section *secp;
1422 struct target_section_table *table;
1423
1424 secp = target_section_by_addr (ops, memaddr);
1425 if (secp != NULL
1426 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1427 & SEC_READONLY))
1428 {
1429 table = target_get_section_table (ops);
1430 return section_table_xfer_memory_partial (readbuf, writebuf,
1431 memaddr, len,
1432 table->sections,
1433 table->sections_end,
1434 NULL);
1435 }
1436 }
1437
1438 /* If reading unavailable memory in the context of traceframes, and
1439 this address falls within a read-only section, fallback to
1440 reading from live memory. */
1441 if (readbuf != NULL && get_traceframe_number () != -1)
1442 {
1443 VEC(mem_range_s) *available;
1444
1445 /* If we fail to get the set of available memory, then the
1446 target does not support querying traceframe info, and so we
1447 attempt reading from the traceframe anyway (assuming the
1448 target implements the old QTro packet then). */
1449 if (traceframe_available_memory (&available, memaddr, len))
1450 {
1451 struct cleanup *old_chain;
1452
1453 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1454
1455 if (VEC_empty (mem_range_s, available)
1456 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1457 {
1458 /* Don't read into the traceframe's available
1459 memory. */
1460 if (!VEC_empty (mem_range_s, available))
1461 {
1462 LONGEST oldlen = len;
1463
1464 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1465 gdb_assert (len <= oldlen);
1466 }
1467
1468 do_cleanups (old_chain);
1469
1470 /* This goes through the topmost target again. */
1471 res = memory_xfer_live_readonly_partial (ops, object,
1472 readbuf, memaddr, len);
1473 if (res > 0)
1474 return res;
1475
1476 /* No use trying further, we know some memory starting
1477 at MEMADDR isn't available. */
1478 return -1;
1479 }
1480
1481 /* Don't try to read more than how much is available, in
1482 case the target implements the deprecated QTro packet to
1483 cater for older GDBs (the target's knowledge of read-only
1484 sections may be outdated by now). */
1485 len = VEC_index (mem_range_s, available, 0)->length;
1486
1487 do_cleanups (old_chain);
1488 }
1489 }
1490
1491 /* Try GDB's internal data cache. */
1492 region = lookup_mem_region (memaddr);
1493 /* region->hi == 0 means there's no upper bound. */
1494 if (memaddr + len < region->hi || region->hi == 0)
1495 reg_len = len;
1496 else
1497 reg_len = region->hi - memaddr;
1498
1499 switch (region->attrib.mode)
1500 {
1501 case MEM_RO:
1502 if (writebuf != NULL)
1503 return -1;
1504 break;
1505
1506 case MEM_WO:
1507 if (readbuf != NULL)
1508 return -1;
1509 break;
1510
1511 case MEM_FLASH:
1512 /* We only support writing to flash during "load" for now. */
1513 if (writebuf != NULL)
1514 error (_("Writing to flash memory forbidden in this context"));
1515 break;
1516
1517 case MEM_NONE:
1518 return -1;
1519 }
1520
1521 if (!ptid_equal (inferior_ptid, null_ptid))
1522 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1523 else
1524 inf = NULL;
1525
1526 if (inf != NULL
1527 /* The dcache reads whole cache lines; that doesn't play well
1528 with reading from a trace buffer, because reading outside of
1529 the collected memory range fails. */
1530 && get_traceframe_number () == -1
1531 && (region->attrib.cache
1532 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1533 {
1534 if (readbuf != NULL)
1535 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1536 reg_len, 0);
1537 else
1538 /* FIXME drow/2006-08-09: If we're going to preserve const
1539 correctness dcache_xfer_memory should take readbuf and
1540 writebuf. */
1541 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1542 (void *) writebuf,
1543 reg_len, 1);
1544 if (res <= 0)
1545 return -1;
1546 else
1547 return res;
1548 }
1549
1550 /* If none of those methods found the memory we wanted, fall back
1551 to a target partial transfer. Normally a single call to
1552 to_xfer_partial is enough; if it doesn't recognize an object
1553 it will call the to_xfer_partial of the next target down.
1554 But for memory this won't do. Memory is the only target
1555 object which can be read from more than one valid target.
1556 A core file, for instance, could have some of memory but
1557 delegate other bits to the target below it. So, we must
1558 manually try all targets. */
1559
1560 do
1561 {
1562 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1563 readbuf, writebuf, memaddr, reg_len);
1564 if (res > 0)
1565 break;
1566
1567 /* We want to continue past core files to executables, but not
1568 past a running target's memory. */
1569 if (ops->to_has_all_memory (ops))
1570 break;
1571
1572 ops = ops->beneath;
1573 }
1574 while (ops != NULL);
1575
1576 /* Make sure the cache gets updated no matter what - if we are writing
1577 to the stack. Even if this write is not tagged as such, we still need
1578 to update the cache. */
1579
1580 if (res > 0
1581 && inf != NULL
1582 && writebuf != NULL
1583 && !region->attrib.cache
1584 && stack_cache_enabled_p
1585 && object != TARGET_OBJECT_STACK_MEMORY)
1586 {
1587 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1588 }
1589
1590 /* If we still haven't got anything, return the last error. We
1591 give up. */
1592 return res;
1593 }
1594
1595 /* Perform a partial memory transfer. For docs see target.h,
1596 to_xfer_partial. */
1597
1598 static LONGEST
1599 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1600 void *readbuf, const void *writebuf, ULONGEST memaddr,
1601 LONGEST len)
1602 {
1603 int res;
1604
1605 /* Zero length requests are ok and require no work. */
1606 if (len == 0)
1607 return 0;
1608
1609 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1610 breakpoint insns, thus hiding out from higher layers whether
1611 there are software breakpoints inserted in the code stream. */
1612 if (readbuf != NULL)
1613 {
1614 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1615
1616 if (res > 0 && !show_memory_breakpoints)
1617 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1618 }
1619 else
1620 {
1621 void *buf;
1622 struct cleanup *old_chain;
1623
1624 buf = xmalloc (len);
1625 old_chain = make_cleanup (xfree, buf);
1626 memcpy (buf, writebuf, len);
1627
1628 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1629 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1630
1631 do_cleanups (old_chain);
1632 }
1633
1634 return res;
1635 }
1636
1637 static void
1638 restore_show_memory_breakpoints (void *arg)
1639 {
1640 show_memory_breakpoints = (uintptr_t) arg;
1641 }
1642
1643 struct cleanup *
1644 make_show_memory_breakpoints_cleanup (int show)
1645 {
1646 int current = show_memory_breakpoints;
1647
1648 show_memory_breakpoints = show;
1649 return make_cleanup (restore_show_memory_breakpoints,
1650 (void *) (uintptr_t) current);
1651 }
1652
1653 /* For docs see target.h, to_xfer_partial. */
1654
1655 static LONGEST
1656 target_xfer_partial (struct target_ops *ops,
1657 enum target_object object, const char *annex,
1658 void *readbuf, const void *writebuf,
1659 ULONGEST offset, LONGEST len)
1660 {
1661 LONGEST retval;
1662
1663 gdb_assert (ops->to_xfer_partial != NULL);
1664
1665 if (writebuf && !may_write_memory)
1666 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1667 core_addr_to_string_nz (offset), plongest (len));
1668
1669 /* If this is a memory transfer, let the memory-specific code
1670 have a look at it instead. Memory transfers are more
1671 complicated. */
1672 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1673 retval = memory_xfer_partial (ops, object, readbuf,
1674 writebuf, offset, len);
1675 else
1676 {
1677 enum target_object raw_object = object;
1678
1679 /* If this is a raw memory transfer, request the normal
1680 memory object from other layers. */
1681 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1682 raw_object = TARGET_OBJECT_MEMORY;
1683
1684 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1685 writebuf, offset, len);
1686 }
1687
1688 if (targetdebug)
1689 {
1690 const unsigned char *myaddr = NULL;
1691
1692 fprintf_unfiltered (gdb_stdlog,
1693 "%s:target_xfer_partial "
1694 "(%d, %s, %s, %s, %s, %s) = %s",
1695 ops->to_shortname,
1696 (int) object,
1697 (annex ? annex : "(null)"),
1698 host_address_to_string (readbuf),
1699 host_address_to_string (writebuf),
1700 core_addr_to_string_nz (offset),
1701 plongest (len), plongest (retval));
1702
1703 if (readbuf)
1704 myaddr = readbuf;
1705 if (writebuf)
1706 myaddr = writebuf;
1707 if (retval > 0 && myaddr != NULL)
1708 {
1709 int i;
1710
1711 fputs_unfiltered (", bytes =", gdb_stdlog);
1712 for (i = 0; i < retval; i++)
1713 {
1714 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1715 {
1716 if (targetdebug < 2 && i > 0)
1717 {
1718 fprintf_unfiltered (gdb_stdlog, " ...");
1719 break;
1720 }
1721 fprintf_unfiltered (gdb_stdlog, "\n");
1722 }
1723
1724 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1725 }
1726 }
1727
1728 fputc_unfiltered ('\n', gdb_stdlog);
1729 }
1730 return retval;
1731 }
1732
1733 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1734 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1735 if any error occurs.
1736
1737 If an error occurs, no guarantee is made about the contents of the data at
1738 MYADDR. In particular, the caller should not depend upon partial reads
1739 filling the buffer with good data. There is no way for the caller to know
1740 how much good data might have been transfered anyway. Callers that can
1741 deal with partial reads should call target_read (which will retry until
1742 it makes no progress, and then return how much was transferred). */
1743
1744 int
1745 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1746 {
1747 /* Dispatch to the topmost target, not the flattened current_target.
1748 Memory accesses check target->to_has_(all_)memory, and the
1749 flattened target doesn't inherit those. */
1750 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1751 myaddr, memaddr, len) == len)
1752 return 0;
1753 else
1754 return EIO;
1755 }
1756
1757 /* Like target_read_memory, but specify explicitly that this is a read from
1758 the target's stack. This may trigger different cache behavior. */
1759
1760 int
1761 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1762 {
1763 /* Dispatch to the topmost target, not the flattened current_target.
1764 Memory accesses check target->to_has_(all_)memory, and the
1765 flattened target doesn't inherit those. */
1766
1767 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1768 myaddr, memaddr, len) == len)
1769 return 0;
1770 else
1771 return EIO;
1772 }
1773
1774 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1775 Returns either 0 for success or an errno value if any error occurs.
1776 If an error occurs, no guarantee is made about how much data got written.
1777 Callers that can deal with partial writes should call target_write. */
1778
1779 int
1780 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1781 {
1782 /* Dispatch to the topmost target, not the flattened current_target.
1783 Memory accesses check target->to_has_(all_)memory, and the
1784 flattened target doesn't inherit those. */
1785 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1786 myaddr, memaddr, len) == len)
1787 return 0;
1788 else
1789 return EIO;
1790 }
1791
1792 /* Write LEN bytes from MYADDR to target raw memory at address
1793 MEMADDR. Returns either 0 for success or an errno value if any
1794 error occurs. If an error occurs, no guarantee is made about how
1795 much data got written. Callers that can deal with partial writes
1796 should call target_write. */
1797
1798 int
1799 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1800 {
1801 /* Dispatch to the topmost target, not the flattened current_target.
1802 Memory accesses check target->to_has_(all_)memory, and the
1803 flattened target doesn't inherit those. */
1804 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1805 myaddr, memaddr, len) == len)
1806 return 0;
1807 else
1808 return EIO;
1809 }
1810
1811 /* Fetch the target's memory map. */
1812
1813 VEC(mem_region_s) *
1814 target_memory_map (void)
1815 {
1816 VEC(mem_region_s) *result;
1817 struct mem_region *last_one, *this_one;
1818 int ix;
1819 struct target_ops *t;
1820
1821 if (targetdebug)
1822 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1823
1824 for (t = current_target.beneath; t != NULL; t = t->beneath)
1825 if (t->to_memory_map != NULL)
1826 break;
1827
1828 if (t == NULL)
1829 return NULL;
1830
1831 result = t->to_memory_map (t);
1832 if (result == NULL)
1833 return NULL;
1834
1835 qsort (VEC_address (mem_region_s, result),
1836 VEC_length (mem_region_s, result),
1837 sizeof (struct mem_region), mem_region_cmp);
1838
1839 /* Check that regions do not overlap. Simultaneously assign
1840 a numbering for the "mem" commands to use to refer to
1841 each region. */
1842 last_one = NULL;
1843 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1844 {
1845 this_one->number = ix;
1846
1847 if (last_one && last_one->hi > this_one->lo)
1848 {
1849 warning (_("Overlapping regions in memory map: ignoring"));
1850 VEC_free (mem_region_s, result);
1851 return NULL;
1852 }
1853 last_one = this_one;
1854 }
1855
1856 return result;
1857 }
1858
1859 void
1860 target_flash_erase (ULONGEST address, LONGEST length)
1861 {
1862 struct target_ops *t;
1863
1864 for (t = current_target.beneath; t != NULL; t = t->beneath)
1865 if (t->to_flash_erase != NULL)
1866 {
1867 if (targetdebug)
1868 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1869 hex_string (address), phex (length, 0));
1870 t->to_flash_erase (t, address, length);
1871 return;
1872 }
1873
1874 tcomplain ();
1875 }
1876
1877 void
1878 target_flash_done (void)
1879 {
1880 struct target_ops *t;
1881
1882 for (t = current_target.beneath; t != NULL; t = t->beneath)
1883 if (t->to_flash_done != NULL)
1884 {
1885 if (targetdebug)
1886 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1887 t->to_flash_done (t);
1888 return;
1889 }
1890
1891 tcomplain ();
1892 }
1893
1894 static void
1895 show_trust_readonly (struct ui_file *file, int from_tty,
1896 struct cmd_list_element *c, const char *value)
1897 {
1898 fprintf_filtered (file,
1899 _("Mode for reading from readonly sections is %s.\n"),
1900 value);
1901 }
1902
1903 /* More generic transfers. */
1904
1905 static LONGEST
1906 default_xfer_partial (struct target_ops *ops, enum target_object object,
1907 const char *annex, gdb_byte *readbuf,
1908 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1909 {
1910 if (object == TARGET_OBJECT_MEMORY
1911 && ops->deprecated_xfer_memory != NULL)
1912 /* If available, fall back to the target's
1913 "deprecated_xfer_memory" method. */
1914 {
1915 int xfered = -1;
1916
1917 errno = 0;
1918 if (writebuf != NULL)
1919 {
1920 void *buffer = xmalloc (len);
1921 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1922
1923 memcpy (buffer, writebuf, len);
1924 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1925 1/*write*/, NULL, ops);
1926 do_cleanups (cleanup);
1927 }
1928 if (readbuf != NULL)
1929 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1930 0/*read*/, NULL, ops);
1931 if (xfered > 0)
1932 return xfered;
1933 else if (xfered == 0 && errno == 0)
1934 /* "deprecated_xfer_memory" uses 0, cross checked against
1935 ERRNO as one indication of an error. */
1936 return 0;
1937 else
1938 return -1;
1939 }
1940 else if (ops->beneath != NULL)
1941 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1942 readbuf, writebuf, offset, len);
1943 else
1944 return -1;
1945 }
1946
1947 /* The xfer_partial handler for the topmost target. Unlike the default,
1948 it does not need to handle memory specially; it just passes all
1949 requests down the stack. */
1950
1951 static LONGEST
1952 current_xfer_partial (struct target_ops *ops, enum target_object object,
1953 const char *annex, gdb_byte *readbuf,
1954 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1955 {
1956 if (ops->beneath != NULL)
1957 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1958 readbuf, writebuf, offset, len);
1959 else
1960 return -1;
1961 }
1962
1963 /* Target vector read/write partial wrapper functions. */
1964
1965 static LONGEST
1966 target_read_partial (struct target_ops *ops,
1967 enum target_object object,
1968 const char *annex, gdb_byte *buf,
1969 ULONGEST offset, LONGEST len)
1970 {
1971 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1972 }
1973
1974 static LONGEST
1975 target_write_partial (struct target_ops *ops,
1976 enum target_object object,
1977 const char *annex, const gdb_byte *buf,
1978 ULONGEST offset, LONGEST len)
1979 {
1980 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1981 }
1982
1983 /* Wrappers to perform the full transfer. */
1984
1985 /* For docs on target_read see target.h. */
1986
1987 LONGEST
1988 target_read (struct target_ops *ops,
1989 enum target_object object,
1990 const char *annex, gdb_byte *buf,
1991 ULONGEST offset, LONGEST len)
1992 {
1993 LONGEST xfered = 0;
1994
1995 while (xfered < len)
1996 {
1997 LONGEST xfer = target_read_partial (ops, object, annex,
1998 (gdb_byte *) buf + xfered,
1999 offset + xfered, len - xfered);
2000
2001 /* Call an observer, notifying them of the xfer progress? */
2002 if (xfer == 0)
2003 return xfered;
2004 if (xfer < 0)
2005 return -1;
2006 xfered += xfer;
2007 QUIT;
2008 }
2009 return len;
2010 }
2011
2012 /* Assuming that the entire [begin, end) range of memory cannot be
2013 read, try to read whatever subrange is possible to read.
2014
2015 The function returns, in RESULT, either zero or one memory block.
2016 If there's a readable subrange at the beginning, it is completely
2017 read and returned. Any further readable subrange will not be read.
2018 Otherwise, if there's a readable subrange at the end, it will be
2019 completely read and returned. Any readable subranges before it
2020 (obviously, not starting at the beginning), will be ignored. In
2021 other cases -- either no readable subrange, or readable subrange(s)
2022 that is neither at the beginning, or end, nothing is returned.
2023
2024 The purpose of this function is to handle a read across a boundary
2025 of accessible memory in a case when memory map is not available.
2026 The above restrictions are fine for this case, but will give
2027 incorrect results if the memory is 'patchy'. However, supporting
2028 'patchy' memory would require trying to read every single byte,
2029 and it seems unacceptable solution. Explicit memory map is
2030 recommended for this case -- and target_read_memory_robust will
2031 take care of reading multiple ranges then. */
2032
2033 static void
2034 read_whatever_is_readable (struct target_ops *ops,
2035 ULONGEST begin, ULONGEST end,
2036 VEC(memory_read_result_s) **result)
2037 {
2038 gdb_byte *buf = xmalloc (end - begin);
2039 ULONGEST current_begin = begin;
2040 ULONGEST current_end = end;
2041 int forward;
2042 memory_read_result_s r;
2043
2044 /* If we previously failed to read 1 byte, nothing can be done here. */
2045 if (end - begin <= 1)
2046 {
2047 xfree (buf);
2048 return;
2049 }
2050
2051 /* Check that either first or the last byte is readable, and give up
2052 if not. This heuristic is meant to permit reading accessible memory
2053 at the boundary of accessible region. */
2054 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2055 buf, begin, 1) == 1)
2056 {
2057 forward = 1;
2058 ++current_begin;
2059 }
2060 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2061 buf + (end-begin) - 1, end - 1, 1) == 1)
2062 {
2063 forward = 0;
2064 --current_end;
2065 }
2066 else
2067 {
2068 xfree (buf);
2069 return;
2070 }
2071
2072 /* Loop invariant is that the [current_begin, current_end) was previously
2073 found to be not readable as a whole.
2074
2075 Note loop condition -- if the range has 1 byte, we can't divide the range
2076 so there's no point trying further. */
2077 while (current_end - current_begin > 1)
2078 {
2079 ULONGEST first_half_begin, first_half_end;
2080 ULONGEST second_half_begin, second_half_end;
2081 LONGEST xfer;
2082 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2083
2084 if (forward)
2085 {
2086 first_half_begin = current_begin;
2087 first_half_end = middle;
2088 second_half_begin = middle;
2089 second_half_end = current_end;
2090 }
2091 else
2092 {
2093 first_half_begin = middle;
2094 first_half_end = current_end;
2095 second_half_begin = current_begin;
2096 second_half_end = middle;
2097 }
2098
2099 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2100 buf + (first_half_begin - begin),
2101 first_half_begin,
2102 first_half_end - first_half_begin);
2103
2104 if (xfer == first_half_end - first_half_begin)
2105 {
2106 /* This half reads up fine. So, the error must be in the
2107 other half. */
2108 current_begin = second_half_begin;
2109 current_end = second_half_end;
2110 }
2111 else
2112 {
2113 /* This half is not readable. Because we've tried one byte, we
2114 know some part of this half if actually redable. Go to the next
2115 iteration to divide again and try to read.
2116
2117 We don't handle the other half, because this function only tries
2118 to read a single readable subrange. */
2119 current_begin = first_half_begin;
2120 current_end = first_half_end;
2121 }
2122 }
2123
2124 if (forward)
2125 {
2126 /* The [begin, current_begin) range has been read. */
2127 r.begin = begin;
2128 r.end = current_begin;
2129 r.data = buf;
2130 }
2131 else
2132 {
2133 /* The [current_end, end) range has been read. */
2134 LONGEST rlen = end - current_end;
2135
2136 r.data = xmalloc (rlen);
2137 memcpy (r.data, buf + current_end - begin, rlen);
2138 r.begin = current_end;
2139 r.end = end;
2140 xfree (buf);
2141 }
2142 VEC_safe_push(memory_read_result_s, (*result), &r);
2143 }
2144
2145 void
2146 free_memory_read_result_vector (void *x)
2147 {
2148 VEC(memory_read_result_s) *v = x;
2149 memory_read_result_s *current;
2150 int ix;
2151
2152 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2153 {
2154 xfree (current->data);
2155 }
2156 VEC_free (memory_read_result_s, v);
2157 }
2158
2159 VEC(memory_read_result_s) *
2160 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2161 {
2162 VEC(memory_read_result_s) *result = 0;
2163
2164 LONGEST xfered = 0;
2165 while (xfered < len)
2166 {
2167 struct mem_region *region = lookup_mem_region (offset + xfered);
2168 LONGEST rlen;
2169
2170 /* If there is no explicit region, a fake one should be created. */
2171 gdb_assert (region);
2172
2173 if (region->hi == 0)
2174 rlen = len - xfered;
2175 else
2176 rlen = region->hi - offset;
2177
2178 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2179 {
2180 /* Cannot read this region. Note that we can end up here only
2181 if the region is explicitly marked inaccessible, or
2182 'inaccessible-by-default' is in effect. */
2183 xfered += rlen;
2184 }
2185 else
2186 {
2187 LONGEST to_read = min (len - xfered, rlen);
2188 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2189
2190 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2191 (gdb_byte *) buffer,
2192 offset + xfered, to_read);
2193 /* Call an observer, notifying them of the xfer progress? */
2194 if (xfer <= 0)
2195 {
2196 /* Got an error reading full chunk. See if maybe we can read
2197 some subrange. */
2198 xfree (buffer);
2199 read_whatever_is_readable (ops, offset + xfered,
2200 offset + xfered + to_read, &result);
2201 xfered += to_read;
2202 }
2203 else
2204 {
2205 struct memory_read_result r;
2206 r.data = buffer;
2207 r.begin = offset + xfered;
2208 r.end = r.begin + xfer;
2209 VEC_safe_push (memory_read_result_s, result, &r);
2210 xfered += xfer;
2211 }
2212 QUIT;
2213 }
2214 }
2215 return result;
2216 }
2217
2218
2219 /* An alternative to target_write with progress callbacks. */
2220
2221 LONGEST
2222 target_write_with_progress (struct target_ops *ops,
2223 enum target_object object,
2224 const char *annex, const gdb_byte *buf,
2225 ULONGEST offset, LONGEST len,
2226 void (*progress) (ULONGEST, void *), void *baton)
2227 {
2228 LONGEST xfered = 0;
2229
2230 /* Give the progress callback a chance to set up. */
2231 if (progress)
2232 (*progress) (0, baton);
2233
2234 while (xfered < len)
2235 {
2236 LONGEST xfer = target_write_partial (ops, object, annex,
2237 (gdb_byte *) buf + xfered,
2238 offset + xfered, len - xfered);
2239
2240 if (xfer == 0)
2241 return xfered;
2242 if (xfer < 0)
2243 return -1;
2244
2245 if (progress)
2246 (*progress) (xfer, baton);
2247
2248 xfered += xfer;
2249 QUIT;
2250 }
2251 return len;
2252 }
2253
2254 /* For docs on target_write see target.h. */
2255
2256 LONGEST
2257 target_write (struct target_ops *ops,
2258 enum target_object object,
2259 const char *annex, const gdb_byte *buf,
2260 ULONGEST offset, LONGEST len)
2261 {
2262 return target_write_with_progress (ops, object, annex, buf, offset, len,
2263 NULL, NULL);
2264 }
2265
2266 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2267 the size of the transferred data. PADDING additional bytes are
2268 available in *BUF_P. This is a helper function for
2269 target_read_alloc; see the declaration of that function for more
2270 information. */
2271
2272 static LONGEST
2273 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2274 const char *annex, gdb_byte **buf_p, int padding)
2275 {
2276 size_t buf_alloc, buf_pos;
2277 gdb_byte *buf;
2278 LONGEST n;
2279
2280 /* This function does not have a length parameter; it reads the
2281 entire OBJECT). Also, it doesn't support objects fetched partly
2282 from one target and partly from another (in a different stratum,
2283 e.g. a core file and an executable). Both reasons make it
2284 unsuitable for reading memory. */
2285 gdb_assert (object != TARGET_OBJECT_MEMORY);
2286
2287 /* Start by reading up to 4K at a time. The target will throttle
2288 this number down if necessary. */
2289 buf_alloc = 4096;
2290 buf = xmalloc (buf_alloc);
2291 buf_pos = 0;
2292 while (1)
2293 {
2294 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2295 buf_pos, buf_alloc - buf_pos - padding);
2296 if (n < 0)
2297 {
2298 /* An error occurred. */
2299 xfree (buf);
2300 return -1;
2301 }
2302 else if (n == 0)
2303 {
2304 /* Read all there was. */
2305 if (buf_pos == 0)
2306 xfree (buf);
2307 else
2308 *buf_p = buf;
2309 return buf_pos;
2310 }
2311
2312 buf_pos += n;
2313
2314 /* If the buffer is filling up, expand it. */
2315 if (buf_alloc < buf_pos * 2)
2316 {
2317 buf_alloc *= 2;
2318 buf = xrealloc (buf, buf_alloc);
2319 }
2320
2321 QUIT;
2322 }
2323 }
2324
2325 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2326 the size of the transferred data. See the declaration in "target.h"
2327 function for more information about the return value. */
2328
2329 LONGEST
2330 target_read_alloc (struct target_ops *ops, enum target_object object,
2331 const char *annex, gdb_byte **buf_p)
2332 {
2333 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2334 }
2335
2336 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2337 returned as a string, allocated using xmalloc. If an error occurs
2338 or the transfer is unsupported, NULL is returned. Empty objects
2339 are returned as allocated but empty strings. A warning is issued
2340 if the result contains any embedded NUL bytes. */
2341
2342 char *
2343 target_read_stralloc (struct target_ops *ops, enum target_object object,
2344 const char *annex)
2345 {
2346 gdb_byte *buffer;
2347 LONGEST transferred;
2348
2349 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2350
2351 if (transferred < 0)
2352 return NULL;
2353
2354 if (transferred == 0)
2355 return xstrdup ("");
2356
2357 buffer[transferred] = 0;
2358 if (strlen (buffer) < transferred)
2359 warning (_("target object %d, annex %s, "
2360 "contained unexpected null characters"),
2361 (int) object, annex ? annex : "(none)");
2362
2363 return (char *) buffer;
2364 }
2365
2366 /* Memory transfer methods. */
2367
2368 void
2369 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2370 LONGEST len)
2371 {
2372 /* This method is used to read from an alternate, non-current
2373 target. This read must bypass the overlay support (as symbols
2374 don't match this target), and GDB's internal cache (wrong cache
2375 for this target). */
2376 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2377 != len)
2378 memory_error (EIO, addr);
2379 }
2380
2381 ULONGEST
2382 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2383 int len, enum bfd_endian byte_order)
2384 {
2385 gdb_byte buf[sizeof (ULONGEST)];
2386
2387 gdb_assert (len <= sizeof (buf));
2388 get_target_memory (ops, addr, buf, len);
2389 return extract_unsigned_integer (buf, len, byte_order);
2390 }
2391
2392 int
2393 target_insert_breakpoint (struct gdbarch *gdbarch,
2394 struct bp_target_info *bp_tgt)
2395 {
2396 if (!may_insert_breakpoints)
2397 {
2398 warning (_("May not insert breakpoints"));
2399 return 1;
2400 }
2401
2402 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2403 }
2404
2405 int
2406 target_remove_breakpoint (struct gdbarch *gdbarch,
2407 struct bp_target_info *bp_tgt)
2408 {
2409 /* This is kind of a weird case to handle, but the permission might
2410 have been changed after breakpoints were inserted - in which case
2411 we should just take the user literally and assume that any
2412 breakpoints should be left in place. */
2413 if (!may_insert_breakpoints)
2414 {
2415 warning (_("May not remove breakpoints"));
2416 return 1;
2417 }
2418
2419 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2420 }
2421
2422 static void
2423 target_info (char *args, int from_tty)
2424 {
2425 struct target_ops *t;
2426 int has_all_mem = 0;
2427
2428 if (symfile_objfile != NULL)
2429 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2430
2431 for (t = target_stack; t != NULL; t = t->beneath)
2432 {
2433 if (!(*t->to_has_memory) (t))
2434 continue;
2435
2436 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2437 continue;
2438 if (has_all_mem)
2439 printf_unfiltered (_("\tWhile running this, "
2440 "GDB does not access memory from...\n"));
2441 printf_unfiltered ("%s:\n", t->to_longname);
2442 (t->to_files_info) (t);
2443 has_all_mem = (*t->to_has_all_memory) (t);
2444 }
2445 }
2446
2447 /* This function is called before any new inferior is created, e.g.
2448 by running a program, attaching, or connecting to a target.
2449 It cleans up any state from previous invocations which might
2450 change between runs. This is a subset of what target_preopen
2451 resets (things which might change between targets). */
2452
2453 void
2454 target_pre_inferior (int from_tty)
2455 {
2456 /* Clear out solib state. Otherwise the solib state of the previous
2457 inferior might have survived and is entirely wrong for the new
2458 target. This has been observed on GNU/Linux using glibc 2.3. How
2459 to reproduce:
2460
2461 bash$ ./foo&
2462 [1] 4711
2463 bash$ ./foo&
2464 [1] 4712
2465 bash$ gdb ./foo
2466 [...]
2467 (gdb) attach 4711
2468 (gdb) detach
2469 (gdb) attach 4712
2470 Cannot access memory at address 0xdeadbeef
2471 */
2472
2473 /* In some OSs, the shared library list is the same/global/shared
2474 across inferiors. If code is shared between processes, so are
2475 memory regions and features. */
2476 if (!gdbarch_has_global_solist (target_gdbarch))
2477 {
2478 no_shared_libraries (NULL, from_tty);
2479
2480 invalidate_target_mem_regions ();
2481
2482 target_clear_description ();
2483 }
2484 }
2485
2486 /* Callback for iterate_over_inferiors. Gets rid of the given
2487 inferior. */
2488
2489 static int
2490 dispose_inferior (struct inferior *inf, void *args)
2491 {
2492 struct thread_info *thread;
2493
2494 thread = any_thread_of_process (inf->pid);
2495 if (thread)
2496 {
2497 switch_to_thread (thread->ptid);
2498
2499 /* Core inferiors actually should be detached, not killed. */
2500 if (target_has_execution)
2501 target_kill ();
2502 else
2503 target_detach (NULL, 0);
2504 }
2505
2506 return 0;
2507 }
2508
2509 /* This is to be called by the open routine before it does
2510 anything. */
2511
2512 void
2513 target_preopen (int from_tty)
2514 {
2515 dont_repeat ();
2516
2517 if (have_inferiors ())
2518 {
2519 if (!from_tty
2520 || !have_live_inferiors ()
2521 || query (_("A program is being debugged already. Kill it? ")))
2522 iterate_over_inferiors (dispose_inferior, NULL);
2523 else
2524 error (_("Program not killed."));
2525 }
2526
2527 /* Calling target_kill may remove the target from the stack. But if
2528 it doesn't (which seems like a win for UDI), remove it now. */
2529 /* Leave the exec target, though. The user may be switching from a
2530 live process to a core of the same program. */
2531 pop_all_targets_above (file_stratum, 0);
2532
2533 target_pre_inferior (from_tty);
2534 }
2535
2536 /* Detach a target after doing deferred register stores. */
2537
2538 void
2539 target_detach (char *args, int from_tty)
2540 {
2541 struct target_ops* t;
2542
2543 if (gdbarch_has_global_breakpoints (target_gdbarch))
2544 /* Don't remove global breakpoints here. They're removed on
2545 disconnection from the target. */
2546 ;
2547 else
2548 /* If we're in breakpoints-always-inserted mode, have to remove
2549 them before detaching. */
2550 remove_breakpoints_pid (PIDGET (inferior_ptid));
2551
2552 prepare_for_detach ();
2553
2554 for (t = current_target.beneath; t != NULL; t = t->beneath)
2555 {
2556 if (t->to_detach != NULL)
2557 {
2558 t->to_detach (t, args, from_tty);
2559 if (targetdebug)
2560 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2561 args, from_tty);
2562 return;
2563 }
2564 }
2565
2566 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2567 }
2568
2569 void
2570 target_disconnect (char *args, int from_tty)
2571 {
2572 struct target_ops *t;
2573
2574 /* If we're in breakpoints-always-inserted mode or if breakpoints
2575 are global across processes, we have to remove them before
2576 disconnecting. */
2577 remove_breakpoints ();
2578
2579 for (t = current_target.beneath; t != NULL; t = t->beneath)
2580 if (t->to_disconnect != NULL)
2581 {
2582 if (targetdebug)
2583 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2584 args, from_tty);
2585 t->to_disconnect (t, args, from_tty);
2586 return;
2587 }
2588
2589 tcomplain ();
2590 }
2591
2592 ptid_t
2593 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2594 {
2595 struct target_ops *t;
2596
2597 for (t = current_target.beneath; t != NULL; t = t->beneath)
2598 {
2599 if (t->to_wait != NULL)
2600 {
2601 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2602
2603 if (targetdebug)
2604 {
2605 char *status_string;
2606
2607 status_string = target_waitstatus_to_string (status);
2608 fprintf_unfiltered (gdb_stdlog,
2609 "target_wait (%d, status) = %d, %s\n",
2610 PIDGET (ptid), PIDGET (retval),
2611 status_string);
2612 xfree (status_string);
2613 }
2614
2615 return retval;
2616 }
2617 }
2618
2619 noprocess ();
2620 }
2621
2622 char *
2623 target_pid_to_str (ptid_t ptid)
2624 {
2625 struct target_ops *t;
2626
2627 for (t = current_target.beneath; t != NULL; t = t->beneath)
2628 {
2629 if (t->to_pid_to_str != NULL)
2630 return (*t->to_pid_to_str) (t, ptid);
2631 }
2632
2633 return normal_pid_to_str (ptid);
2634 }
2635
2636 char *
2637 target_thread_name (struct thread_info *info)
2638 {
2639 struct target_ops *t;
2640
2641 for (t = current_target.beneath; t != NULL; t = t->beneath)
2642 {
2643 if (t->to_thread_name != NULL)
2644 return (*t->to_thread_name) (info);
2645 }
2646
2647 return NULL;
2648 }
2649
2650 void
2651 target_resume (ptid_t ptid, int step, enum target_signal signal)
2652 {
2653 struct target_ops *t;
2654
2655 target_dcache_invalidate ();
2656
2657 for (t = current_target.beneath; t != NULL; t = t->beneath)
2658 {
2659 if (t->to_resume != NULL)
2660 {
2661 t->to_resume (t, ptid, step, signal);
2662 if (targetdebug)
2663 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2664 PIDGET (ptid),
2665 step ? "step" : "continue",
2666 target_signal_to_name (signal));
2667
2668 registers_changed_ptid (ptid);
2669 set_executing (ptid, 1);
2670 set_running (ptid, 1);
2671 clear_inline_frame_state (ptid);
2672 return;
2673 }
2674 }
2675
2676 noprocess ();
2677 }
2678
2679 void
2680 target_pass_signals (int numsigs, unsigned char *pass_signals)
2681 {
2682 struct target_ops *t;
2683
2684 for (t = current_target.beneath; t != NULL; t = t->beneath)
2685 {
2686 if (t->to_pass_signals != NULL)
2687 {
2688 if (targetdebug)
2689 {
2690 int i;
2691
2692 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2693 numsigs);
2694
2695 for (i = 0; i < numsigs; i++)
2696 if (pass_signals[i])
2697 fprintf_unfiltered (gdb_stdlog, " %s",
2698 target_signal_to_name (i));
2699
2700 fprintf_unfiltered (gdb_stdlog, " })\n");
2701 }
2702
2703 (*t->to_pass_signals) (numsigs, pass_signals);
2704 return;
2705 }
2706 }
2707 }
2708
2709 /* Look through the list of possible targets for a target that can
2710 follow forks. */
2711
2712 int
2713 target_follow_fork (int follow_child)
2714 {
2715 struct target_ops *t;
2716
2717 for (t = current_target.beneath; t != NULL; t = t->beneath)
2718 {
2719 if (t->to_follow_fork != NULL)
2720 {
2721 int retval = t->to_follow_fork (t, follow_child);
2722
2723 if (targetdebug)
2724 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2725 follow_child, retval);
2726 return retval;
2727 }
2728 }
2729
2730 /* Some target returned a fork event, but did not know how to follow it. */
2731 internal_error (__FILE__, __LINE__,
2732 _("could not find a target to follow fork"));
2733 }
2734
2735 void
2736 target_mourn_inferior (void)
2737 {
2738 struct target_ops *t;
2739
2740 for (t = current_target.beneath; t != NULL; t = t->beneath)
2741 {
2742 if (t->to_mourn_inferior != NULL)
2743 {
2744 t->to_mourn_inferior (t);
2745 if (targetdebug)
2746 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2747
2748 /* We no longer need to keep handles on any of the object files.
2749 Make sure to release them to avoid unnecessarily locking any
2750 of them while we're not actually debugging. */
2751 bfd_cache_close_all ();
2752
2753 return;
2754 }
2755 }
2756
2757 internal_error (__FILE__, __LINE__,
2758 _("could not find a target to follow mourn inferior"));
2759 }
2760
2761 /* Look for a target which can describe architectural features, starting
2762 from TARGET. If we find one, return its description. */
2763
2764 const struct target_desc *
2765 target_read_description (struct target_ops *target)
2766 {
2767 struct target_ops *t;
2768
2769 for (t = target; t != NULL; t = t->beneath)
2770 if (t->to_read_description != NULL)
2771 {
2772 const struct target_desc *tdesc;
2773
2774 tdesc = t->to_read_description (t);
2775 if (tdesc)
2776 return tdesc;
2777 }
2778
2779 return NULL;
2780 }
2781
2782 /* The default implementation of to_search_memory.
2783 This implements a basic search of memory, reading target memory and
2784 performing the search here (as opposed to performing the search in on the
2785 target side with, for example, gdbserver). */
2786
2787 int
2788 simple_search_memory (struct target_ops *ops,
2789 CORE_ADDR start_addr, ULONGEST search_space_len,
2790 const gdb_byte *pattern, ULONGEST pattern_len,
2791 CORE_ADDR *found_addrp)
2792 {
2793 /* NOTE: also defined in find.c testcase. */
2794 #define SEARCH_CHUNK_SIZE 16000
2795 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2796 /* Buffer to hold memory contents for searching. */
2797 gdb_byte *search_buf;
2798 unsigned search_buf_size;
2799 struct cleanup *old_cleanups;
2800
2801 search_buf_size = chunk_size + pattern_len - 1;
2802
2803 /* No point in trying to allocate a buffer larger than the search space. */
2804 if (search_space_len < search_buf_size)
2805 search_buf_size = search_space_len;
2806
2807 search_buf = malloc (search_buf_size);
2808 if (search_buf == NULL)
2809 error (_("Unable to allocate memory to perform the search."));
2810 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2811
2812 /* Prime the search buffer. */
2813
2814 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2815 search_buf, start_addr, search_buf_size) != search_buf_size)
2816 {
2817 warning (_("Unable to access target memory at %s, halting search."),
2818 hex_string (start_addr));
2819 do_cleanups (old_cleanups);
2820 return -1;
2821 }
2822
2823 /* Perform the search.
2824
2825 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2826 When we've scanned N bytes we copy the trailing bytes to the start and
2827 read in another N bytes. */
2828
2829 while (search_space_len >= pattern_len)
2830 {
2831 gdb_byte *found_ptr;
2832 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2833
2834 found_ptr = memmem (search_buf, nr_search_bytes,
2835 pattern, pattern_len);
2836
2837 if (found_ptr != NULL)
2838 {
2839 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2840
2841 *found_addrp = found_addr;
2842 do_cleanups (old_cleanups);
2843 return 1;
2844 }
2845
2846 /* Not found in this chunk, skip to next chunk. */
2847
2848 /* Don't let search_space_len wrap here, it's unsigned. */
2849 if (search_space_len >= chunk_size)
2850 search_space_len -= chunk_size;
2851 else
2852 search_space_len = 0;
2853
2854 if (search_space_len >= pattern_len)
2855 {
2856 unsigned keep_len = search_buf_size - chunk_size;
2857 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2858 int nr_to_read;
2859
2860 /* Copy the trailing part of the previous iteration to the front
2861 of the buffer for the next iteration. */
2862 gdb_assert (keep_len == pattern_len - 1);
2863 memcpy (search_buf, search_buf + chunk_size, keep_len);
2864
2865 nr_to_read = min (search_space_len - keep_len, chunk_size);
2866
2867 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2868 search_buf + keep_len, read_addr,
2869 nr_to_read) != nr_to_read)
2870 {
2871 warning (_("Unable to access target "
2872 "memory at %s, halting search."),
2873 hex_string (read_addr));
2874 do_cleanups (old_cleanups);
2875 return -1;
2876 }
2877
2878 start_addr += chunk_size;
2879 }
2880 }
2881
2882 /* Not found. */
2883
2884 do_cleanups (old_cleanups);
2885 return 0;
2886 }
2887
2888 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2889 sequence of bytes in PATTERN with length PATTERN_LEN.
2890
2891 The result is 1 if found, 0 if not found, and -1 if there was an error
2892 requiring halting of the search (e.g. memory read error).
2893 If the pattern is found the address is recorded in FOUND_ADDRP. */
2894
2895 int
2896 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2897 const gdb_byte *pattern, ULONGEST pattern_len,
2898 CORE_ADDR *found_addrp)
2899 {
2900 struct target_ops *t;
2901 int found;
2902
2903 /* We don't use INHERIT to set current_target.to_search_memory,
2904 so we have to scan the target stack and handle targetdebug
2905 ourselves. */
2906
2907 if (targetdebug)
2908 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2909 hex_string (start_addr));
2910
2911 for (t = current_target.beneath; t != NULL; t = t->beneath)
2912 if (t->to_search_memory != NULL)
2913 break;
2914
2915 if (t != NULL)
2916 {
2917 found = t->to_search_memory (t, start_addr, search_space_len,
2918 pattern, pattern_len, found_addrp);
2919 }
2920 else
2921 {
2922 /* If a special version of to_search_memory isn't available, use the
2923 simple version. */
2924 found = simple_search_memory (current_target.beneath,
2925 start_addr, search_space_len,
2926 pattern, pattern_len, found_addrp);
2927 }
2928
2929 if (targetdebug)
2930 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2931
2932 return found;
2933 }
2934
2935 /* Look through the currently pushed targets. If none of them will
2936 be able to restart the currently running process, issue an error
2937 message. */
2938
2939 void
2940 target_require_runnable (void)
2941 {
2942 struct target_ops *t;
2943
2944 for (t = target_stack; t != NULL; t = t->beneath)
2945 {
2946 /* If this target knows how to create a new program, then
2947 assume we will still be able to after killing the current
2948 one. Either killing and mourning will not pop T, or else
2949 find_default_run_target will find it again. */
2950 if (t->to_create_inferior != NULL)
2951 return;
2952
2953 /* Do not worry about thread_stratum targets that can not
2954 create inferiors. Assume they will be pushed again if
2955 necessary, and continue to the process_stratum. */
2956 if (t->to_stratum == thread_stratum
2957 || t->to_stratum == arch_stratum)
2958 continue;
2959
2960 error (_("The \"%s\" target does not support \"run\". "
2961 "Try \"help target\" or \"continue\"."),
2962 t->to_shortname);
2963 }
2964
2965 /* This function is only called if the target is running. In that
2966 case there should have been a process_stratum target and it
2967 should either know how to create inferiors, or not... */
2968 internal_error (__FILE__, __LINE__, _("No targets found"));
2969 }
2970
2971 /* Look through the list of possible targets for a target that can
2972 execute a run or attach command without any other data. This is
2973 used to locate the default process stratum.
2974
2975 If DO_MESG is not NULL, the result is always valid (error() is
2976 called for errors); else, return NULL on error. */
2977
2978 static struct target_ops *
2979 find_default_run_target (char *do_mesg)
2980 {
2981 struct target_ops **t;
2982 struct target_ops *runable = NULL;
2983 int count;
2984
2985 count = 0;
2986
2987 for (t = target_structs; t < target_structs + target_struct_size;
2988 ++t)
2989 {
2990 if ((*t)->to_can_run && target_can_run (*t))
2991 {
2992 runable = *t;
2993 ++count;
2994 }
2995 }
2996
2997 if (count != 1)
2998 {
2999 if (do_mesg)
3000 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3001 else
3002 return NULL;
3003 }
3004
3005 return runable;
3006 }
3007
3008 void
3009 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3010 {
3011 struct target_ops *t;
3012
3013 t = find_default_run_target ("attach");
3014 (t->to_attach) (t, args, from_tty);
3015 return;
3016 }
3017
3018 void
3019 find_default_create_inferior (struct target_ops *ops,
3020 char *exec_file, char *allargs, char **env,
3021 int from_tty)
3022 {
3023 struct target_ops *t;
3024
3025 t = find_default_run_target ("run");
3026 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3027 return;
3028 }
3029
3030 static int
3031 find_default_can_async_p (void)
3032 {
3033 struct target_ops *t;
3034
3035 /* This may be called before the target is pushed on the stack;
3036 look for the default process stratum. If there's none, gdb isn't
3037 configured with a native debugger, and target remote isn't
3038 connected yet. */
3039 t = find_default_run_target (NULL);
3040 if (t && t->to_can_async_p)
3041 return (t->to_can_async_p) ();
3042 return 0;
3043 }
3044
3045 static int
3046 find_default_is_async_p (void)
3047 {
3048 struct target_ops *t;
3049
3050 /* This may be called before the target is pushed on the stack;
3051 look for the default process stratum. If there's none, gdb isn't
3052 configured with a native debugger, and target remote isn't
3053 connected yet. */
3054 t = find_default_run_target (NULL);
3055 if (t && t->to_is_async_p)
3056 return (t->to_is_async_p) ();
3057 return 0;
3058 }
3059
3060 static int
3061 find_default_supports_non_stop (void)
3062 {
3063 struct target_ops *t;
3064
3065 t = find_default_run_target (NULL);
3066 if (t && t->to_supports_non_stop)
3067 return (t->to_supports_non_stop) ();
3068 return 0;
3069 }
3070
3071 int
3072 target_supports_non_stop (void)
3073 {
3074 struct target_ops *t;
3075
3076 for (t = &current_target; t != NULL; t = t->beneath)
3077 if (t->to_supports_non_stop)
3078 return t->to_supports_non_stop ();
3079
3080 return 0;
3081 }
3082
3083 static int
3084 find_default_supports_disable_randomization (void)
3085 {
3086 struct target_ops *t;
3087
3088 t = find_default_run_target (NULL);
3089 if (t && t->to_supports_disable_randomization)
3090 return (t->to_supports_disable_randomization) ();
3091 return 0;
3092 }
3093
3094 int
3095 target_supports_disable_randomization (void)
3096 {
3097 struct target_ops *t;
3098
3099 for (t = &current_target; t != NULL; t = t->beneath)
3100 if (t->to_supports_disable_randomization)
3101 return t->to_supports_disable_randomization ();
3102
3103 return 0;
3104 }
3105
3106 char *
3107 target_get_osdata (const char *type)
3108 {
3109 struct target_ops *t;
3110
3111 /* If we're already connected to something that can get us OS
3112 related data, use it. Otherwise, try using the native
3113 target. */
3114 if (current_target.to_stratum >= process_stratum)
3115 t = current_target.beneath;
3116 else
3117 t = find_default_run_target ("get OS data");
3118
3119 if (!t)
3120 return NULL;
3121
3122 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3123 }
3124
3125 /* Determine the current address space of thread PTID. */
3126
3127 struct address_space *
3128 target_thread_address_space (ptid_t ptid)
3129 {
3130 struct address_space *aspace;
3131 struct inferior *inf;
3132 struct target_ops *t;
3133
3134 for (t = current_target.beneath; t != NULL; t = t->beneath)
3135 {
3136 if (t->to_thread_address_space != NULL)
3137 {
3138 aspace = t->to_thread_address_space (t, ptid);
3139 gdb_assert (aspace);
3140
3141 if (targetdebug)
3142 fprintf_unfiltered (gdb_stdlog,
3143 "target_thread_address_space (%s) = %d\n",
3144 target_pid_to_str (ptid),
3145 address_space_num (aspace));
3146 return aspace;
3147 }
3148 }
3149
3150 /* Fall-back to the "main" address space of the inferior. */
3151 inf = find_inferior_pid (ptid_get_pid (ptid));
3152
3153 if (inf == NULL || inf->aspace == NULL)
3154 internal_error (__FILE__, __LINE__,
3155 _("Can't determine the current "
3156 "address space of thread %s\n"),
3157 target_pid_to_str (ptid));
3158
3159 return inf->aspace;
3160 }
3161
3162 static int
3163 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3164 {
3165 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3166 }
3167
3168 static int
3169 default_watchpoint_addr_within_range (struct target_ops *target,
3170 CORE_ADDR addr,
3171 CORE_ADDR start, int length)
3172 {
3173 return addr >= start && addr < start + length;
3174 }
3175
3176 static struct gdbarch *
3177 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3178 {
3179 return target_gdbarch;
3180 }
3181
3182 static int
3183 return_zero (void)
3184 {
3185 return 0;
3186 }
3187
3188 static int
3189 return_one (void)
3190 {
3191 return 1;
3192 }
3193
3194 static int
3195 return_minus_one (void)
3196 {
3197 return -1;
3198 }
3199
3200 /* Find a single runnable target in the stack and return it. If for
3201 some reason there is more than one, return NULL. */
3202
3203 struct target_ops *
3204 find_run_target (void)
3205 {
3206 struct target_ops **t;
3207 struct target_ops *runable = NULL;
3208 int count;
3209
3210 count = 0;
3211
3212 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3213 {
3214 if ((*t)->to_can_run && target_can_run (*t))
3215 {
3216 runable = *t;
3217 ++count;
3218 }
3219 }
3220
3221 return (count == 1 ? runable : NULL);
3222 }
3223
3224 /*
3225 * Find the next target down the stack from the specified target.
3226 */
3227
3228 struct target_ops *
3229 find_target_beneath (struct target_ops *t)
3230 {
3231 return t->beneath;
3232 }
3233
3234 \f
3235 /* The inferior process has died. Long live the inferior! */
3236
3237 void
3238 generic_mourn_inferior (void)
3239 {
3240 ptid_t ptid;
3241
3242 ptid = inferior_ptid;
3243 inferior_ptid = null_ptid;
3244
3245 if (!ptid_equal (ptid, null_ptid))
3246 {
3247 int pid = ptid_get_pid (ptid);
3248 exit_inferior (pid);
3249 }
3250
3251 breakpoint_init_inferior (inf_exited);
3252 registers_changed ();
3253
3254 reopen_exec_file ();
3255 reinit_frame_cache ();
3256
3257 if (deprecated_detach_hook)
3258 deprecated_detach_hook ();
3259 }
3260 \f
3261 /* Helper function for child_wait and the derivatives of child_wait.
3262 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3263 translation of that in OURSTATUS. */
3264 void
3265 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3266 {
3267 if (WIFEXITED (hoststatus))
3268 {
3269 ourstatus->kind = TARGET_WAITKIND_EXITED;
3270 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3271 }
3272 else if (!WIFSTOPPED (hoststatus))
3273 {
3274 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3275 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3276 }
3277 else
3278 {
3279 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3280 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3281 }
3282 }
3283 \f
3284 /* Convert a normal process ID to a string. Returns the string in a
3285 static buffer. */
3286
3287 char *
3288 normal_pid_to_str (ptid_t ptid)
3289 {
3290 static char buf[32];
3291
3292 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3293 return buf;
3294 }
3295
3296 static char *
3297 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3298 {
3299 return normal_pid_to_str (ptid);
3300 }
3301
3302 /* Error-catcher for target_find_memory_regions. */
3303 static int
3304 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3305 {
3306 error (_("Command not implemented for this target."));
3307 return 0;
3308 }
3309
3310 /* Error-catcher for target_make_corefile_notes. */
3311 static char *
3312 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3313 {
3314 error (_("Command not implemented for this target."));
3315 return NULL;
3316 }
3317
3318 /* Error-catcher for target_get_bookmark. */
3319 static gdb_byte *
3320 dummy_get_bookmark (char *ignore1, int ignore2)
3321 {
3322 tcomplain ();
3323 return NULL;
3324 }
3325
3326 /* Error-catcher for target_goto_bookmark. */
3327 static void
3328 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3329 {
3330 tcomplain ();
3331 }
3332
3333 /* Set up the handful of non-empty slots needed by the dummy target
3334 vector. */
3335
3336 static void
3337 init_dummy_target (void)
3338 {
3339 dummy_target.to_shortname = "None";
3340 dummy_target.to_longname = "None";
3341 dummy_target.to_doc = "";
3342 dummy_target.to_attach = find_default_attach;
3343 dummy_target.to_detach =
3344 (void (*)(struct target_ops *, char *, int))target_ignore;
3345 dummy_target.to_create_inferior = find_default_create_inferior;
3346 dummy_target.to_can_async_p = find_default_can_async_p;
3347 dummy_target.to_is_async_p = find_default_is_async_p;
3348 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3349 dummy_target.to_supports_disable_randomization
3350 = find_default_supports_disable_randomization;
3351 dummy_target.to_pid_to_str = dummy_pid_to_str;
3352 dummy_target.to_stratum = dummy_stratum;
3353 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3354 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3355 dummy_target.to_get_bookmark = dummy_get_bookmark;
3356 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3357 dummy_target.to_xfer_partial = default_xfer_partial;
3358 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3359 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3360 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3361 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3362 dummy_target.to_has_execution
3363 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3364 dummy_target.to_stopped_by_watchpoint = return_zero;
3365 dummy_target.to_stopped_data_address =
3366 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3367 dummy_target.to_magic = OPS_MAGIC;
3368 }
3369 \f
3370 static void
3371 debug_to_open (char *args, int from_tty)
3372 {
3373 debug_target.to_open (args, from_tty);
3374
3375 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3376 }
3377
3378 void
3379 target_close (struct target_ops *targ, int quitting)
3380 {
3381 if (targ->to_xclose != NULL)
3382 targ->to_xclose (targ, quitting);
3383 else if (targ->to_close != NULL)
3384 targ->to_close (quitting);
3385
3386 if (targetdebug)
3387 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3388 }
3389
3390 void
3391 target_attach (char *args, int from_tty)
3392 {
3393 struct target_ops *t;
3394
3395 for (t = current_target.beneath; t != NULL; t = t->beneath)
3396 {
3397 if (t->to_attach != NULL)
3398 {
3399 t->to_attach (t, args, from_tty);
3400 if (targetdebug)
3401 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3402 args, from_tty);
3403 return;
3404 }
3405 }
3406
3407 internal_error (__FILE__, __LINE__,
3408 _("could not find a target to attach"));
3409 }
3410
3411 int
3412 target_thread_alive (ptid_t ptid)
3413 {
3414 struct target_ops *t;
3415
3416 for (t = current_target.beneath; t != NULL; t = t->beneath)
3417 {
3418 if (t->to_thread_alive != NULL)
3419 {
3420 int retval;
3421
3422 retval = t->to_thread_alive (t, ptid);
3423 if (targetdebug)
3424 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3425 PIDGET (ptid), retval);
3426
3427 return retval;
3428 }
3429 }
3430
3431 return 0;
3432 }
3433
3434 void
3435 target_find_new_threads (void)
3436 {
3437 struct target_ops *t;
3438
3439 for (t = current_target.beneath; t != NULL; t = t->beneath)
3440 {
3441 if (t->to_find_new_threads != NULL)
3442 {
3443 t->to_find_new_threads (t);
3444 if (targetdebug)
3445 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3446
3447 return;
3448 }
3449 }
3450 }
3451
3452 void
3453 target_stop (ptid_t ptid)
3454 {
3455 if (!may_stop)
3456 {
3457 warning (_("May not interrupt or stop the target, ignoring attempt"));
3458 return;
3459 }
3460
3461 (*current_target.to_stop) (ptid);
3462 }
3463
3464 static void
3465 debug_to_post_attach (int pid)
3466 {
3467 debug_target.to_post_attach (pid);
3468
3469 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3470 }
3471
3472 /* Return a pretty printed form of target_waitstatus.
3473 Space for the result is malloc'd, caller must free. */
3474
3475 char *
3476 target_waitstatus_to_string (const struct target_waitstatus *ws)
3477 {
3478 const char *kind_str = "status->kind = ";
3479
3480 switch (ws->kind)
3481 {
3482 case TARGET_WAITKIND_EXITED:
3483 return xstrprintf ("%sexited, status = %d",
3484 kind_str, ws->value.integer);
3485 case TARGET_WAITKIND_STOPPED:
3486 return xstrprintf ("%sstopped, signal = %s",
3487 kind_str, target_signal_to_name (ws->value.sig));
3488 case TARGET_WAITKIND_SIGNALLED:
3489 return xstrprintf ("%ssignalled, signal = %s",
3490 kind_str, target_signal_to_name (ws->value.sig));
3491 case TARGET_WAITKIND_LOADED:
3492 return xstrprintf ("%sloaded", kind_str);
3493 case TARGET_WAITKIND_FORKED:
3494 return xstrprintf ("%sforked", kind_str);
3495 case TARGET_WAITKIND_VFORKED:
3496 return xstrprintf ("%svforked", kind_str);
3497 case TARGET_WAITKIND_EXECD:
3498 return xstrprintf ("%sexecd", kind_str);
3499 case TARGET_WAITKIND_SYSCALL_ENTRY:
3500 return xstrprintf ("%sentered syscall", kind_str);
3501 case TARGET_WAITKIND_SYSCALL_RETURN:
3502 return xstrprintf ("%sexited syscall", kind_str);
3503 case TARGET_WAITKIND_SPURIOUS:
3504 return xstrprintf ("%sspurious", kind_str);
3505 case TARGET_WAITKIND_IGNORE:
3506 return xstrprintf ("%signore", kind_str);
3507 case TARGET_WAITKIND_NO_HISTORY:
3508 return xstrprintf ("%sno-history", kind_str);
3509 case TARGET_WAITKIND_NO_RESUMED:
3510 return xstrprintf ("%sno-resumed", kind_str);
3511 default:
3512 return xstrprintf ("%sunknown???", kind_str);
3513 }
3514 }
3515
3516 static void
3517 debug_print_register (const char * func,
3518 struct regcache *regcache, int regno)
3519 {
3520 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3521
3522 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3523 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3524 && gdbarch_register_name (gdbarch, regno) != NULL
3525 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3526 fprintf_unfiltered (gdb_stdlog, "(%s)",
3527 gdbarch_register_name (gdbarch, regno));
3528 else
3529 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3530 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3531 {
3532 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3533 int i, size = register_size (gdbarch, regno);
3534 unsigned char buf[MAX_REGISTER_SIZE];
3535
3536 regcache_raw_collect (regcache, regno, buf);
3537 fprintf_unfiltered (gdb_stdlog, " = ");
3538 for (i = 0; i < size; i++)
3539 {
3540 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3541 }
3542 if (size <= sizeof (LONGEST))
3543 {
3544 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3545
3546 fprintf_unfiltered (gdb_stdlog, " %s %s",
3547 core_addr_to_string_nz (val), plongest (val));
3548 }
3549 }
3550 fprintf_unfiltered (gdb_stdlog, "\n");
3551 }
3552
3553 void
3554 target_fetch_registers (struct regcache *regcache, int regno)
3555 {
3556 struct target_ops *t;
3557
3558 for (t = current_target.beneath; t != NULL; t = t->beneath)
3559 {
3560 if (t->to_fetch_registers != NULL)
3561 {
3562 t->to_fetch_registers (t, regcache, regno);
3563 if (targetdebug)
3564 debug_print_register ("target_fetch_registers", regcache, regno);
3565 return;
3566 }
3567 }
3568 }
3569
3570 void
3571 target_store_registers (struct regcache *regcache, int regno)
3572 {
3573 struct target_ops *t;
3574
3575 if (!may_write_registers)
3576 error (_("Writing to registers is not allowed (regno %d)"), regno);
3577
3578 for (t = current_target.beneath; t != NULL; t = t->beneath)
3579 {
3580 if (t->to_store_registers != NULL)
3581 {
3582 t->to_store_registers (t, regcache, regno);
3583 if (targetdebug)
3584 {
3585 debug_print_register ("target_store_registers", regcache, regno);
3586 }
3587 return;
3588 }
3589 }
3590
3591 noprocess ();
3592 }
3593
3594 int
3595 target_core_of_thread (ptid_t ptid)
3596 {
3597 struct target_ops *t;
3598
3599 for (t = current_target.beneath; t != NULL; t = t->beneath)
3600 {
3601 if (t->to_core_of_thread != NULL)
3602 {
3603 int retval = t->to_core_of_thread (t, ptid);
3604
3605 if (targetdebug)
3606 fprintf_unfiltered (gdb_stdlog,
3607 "target_core_of_thread (%d) = %d\n",
3608 PIDGET (ptid), retval);
3609 return retval;
3610 }
3611 }
3612
3613 return -1;
3614 }
3615
3616 int
3617 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3618 {
3619 struct target_ops *t;
3620
3621 for (t = current_target.beneath; t != NULL; t = t->beneath)
3622 {
3623 if (t->to_verify_memory != NULL)
3624 {
3625 int retval = t->to_verify_memory (t, data, memaddr, size);
3626
3627 if (targetdebug)
3628 fprintf_unfiltered (gdb_stdlog,
3629 "target_verify_memory (%s, %s) = %d\n",
3630 paddress (target_gdbarch, memaddr),
3631 pulongest (size),
3632 retval);
3633 return retval;
3634 }
3635 }
3636
3637 tcomplain ();
3638 }
3639
3640 /* The documentation for this function is in its prototype declaration in
3641 target.h. */
3642
3643 int
3644 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3645 {
3646 struct target_ops *t;
3647
3648 for (t = current_target.beneath; t != NULL; t = t->beneath)
3649 if (t->to_insert_mask_watchpoint != NULL)
3650 {
3651 int ret;
3652
3653 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
3654
3655 if (targetdebug)
3656 fprintf_unfiltered (gdb_stdlog, "\
3657 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3658 core_addr_to_string (addr),
3659 core_addr_to_string (mask), rw, ret);
3660
3661 return ret;
3662 }
3663
3664 return 1;
3665 }
3666
3667 /* The documentation for this function is in its prototype declaration in
3668 target.h. */
3669
3670 int
3671 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3672 {
3673 struct target_ops *t;
3674
3675 for (t = current_target.beneath; t != NULL; t = t->beneath)
3676 if (t->to_remove_mask_watchpoint != NULL)
3677 {
3678 int ret;
3679
3680 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
3681
3682 if (targetdebug)
3683 fprintf_unfiltered (gdb_stdlog, "\
3684 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3685 core_addr_to_string (addr),
3686 core_addr_to_string (mask), rw, ret);
3687
3688 return ret;
3689 }
3690
3691 return 1;
3692 }
3693
3694 /* The documentation for this function is in its prototype declaration
3695 in target.h. */
3696
3697 int
3698 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3699 {
3700 struct target_ops *t;
3701
3702 for (t = current_target.beneath; t != NULL; t = t->beneath)
3703 if (t->to_masked_watch_num_registers != NULL)
3704 return t->to_masked_watch_num_registers (t, addr, mask);
3705
3706 return -1;
3707 }
3708
3709 /* The documentation for this function is in its prototype declaration
3710 in target.h. */
3711
3712 int
3713 target_ranged_break_num_registers (void)
3714 {
3715 struct target_ops *t;
3716
3717 for (t = current_target.beneath; t != NULL; t = t->beneath)
3718 if (t->to_ranged_break_num_registers != NULL)
3719 return t->to_ranged_break_num_registers (t);
3720
3721 return -1;
3722 }
3723
3724 static void
3725 debug_to_prepare_to_store (struct regcache *regcache)
3726 {
3727 debug_target.to_prepare_to_store (regcache);
3728
3729 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3730 }
3731
3732 static int
3733 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3734 int write, struct mem_attrib *attrib,
3735 struct target_ops *target)
3736 {
3737 int retval;
3738
3739 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3740 attrib, target);
3741
3742 fprintf_unfiltered (gdb_stdlog,
3743 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3744 paddress (target_gdbarch, memaddr), len,
3745 write ? "write" : "read", retval);
3746
3747 if (retval > 0)
3748 {
3749 int i;
3750
3751 fputs_unfiltered (", bytes =", gdb_stdlog);
3752 for (i = 0; i < retval; i++)
3753 {
3754 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3755 {
3756 if (targetdebug < 2 && i > 0)
3757 {
3758 fprintf_unfiltered (gdb_stdlog, " ...");
3759 break;
3760 }
3761 fprintf_unfiltered (gdb_stdlog, "\n");
3762 }
3763
3764 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3765 }
3766 }
3767
3768 fputc_unfiltered ('\n', gdb_stdlog);
3769
3770 return retval;
3771 }
3772
3773 static void
3774 debug_to_files_info (struct target_ops *target)
3775 {
3776 debug_target.to_files_info (target);
3777
3778 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3779 }
3780
3781 static int
3782 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3783 struct bp_target_info *bp_tgt)
3784 {
3785 int retval;
3786
3787 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3788
3789 fprintf_unfiltered (gdb_stdlog,
3790 "target_insert_breakpoint (%s, xxx) = %ld\n",
3791 core_addr_to_string (bp_tgt->placed_address),
3792 (unsigned long) retval);
3793 return retval;
3794 }
3795
3796 static int
3797 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3798 struct bp_target_info *bp_tgt)
3799 {
3800 int retval;
3801
3802 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3803
3804 fprintf_unfiltered (gdb_stdlog,
3805 "target_remove_breakpoint (%s, xxx) = %ld\n",
3806 core_addr_to_string (bp_tgt->placed_address),
3807 (unsigned long) retval);
3808 return retval;
3809 }
3810
3811 static int
3812 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3813 {
3814 int retval;
3815
3816 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3817
3818 fprintf_unfiltered (gdb_stdlog,
3819 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3820 (unsigned long) type,
3821 (unsigned long) cnt,
3822 (unsigned long) from_tty,
3823 (unsigned long) retval);
3824 return retval;
3825 }
3826
3827 static int
3828 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3829 {
3830 CORE_ADDR retval;
3831
3832 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3833
3834 fprintf_unfiltered (gdb_stdlog,
3835 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3836 core_addr_to_string (addr), (unsigned long) len,
3837 core_addr_to_string (retval));
3838 return retval;
3839 }
3840
3841 static int
3842 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
3843 struct expression *cond)
3844 {
3845 int retval;
3846
3847 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
3848 rw, cond);
3849
3850 fprintf_unfiltered (gdb_stdlog,
3851 "target_can_accel_watchpoint_condition "
3852 "(%s, %d, %d, %s) = %ld\n",
3853 core_addr_to_string (addr), len, rw,
3854 host_address_to_string (cond), (unsigned long) retval);
3855 return retval;
3856 }
3857
3858 static int
3859 debug_to_stopped_by_watchpoint (void)
3860 {
3861 int retval;
3862
3863 retval = debug_target.to_stopped_by_watchpoint ();
3864
3865 fprintf_unfiltered (gdb_stdlog,
3866 "target_stopped_by_watchpoint () = %ld\n",
3867 (unsigned long) retval);
3868 return retval;
3869 }
3870
3871 static int
3872 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3873 {
3874 int retval;
3875
3876 retval = debug_target.to_stopped_data_address (target, addr);
3877
3878 fprintf_unfiltered (gdb_stdlog,
3879 "target_stopped_data_address ([%s]) = %ld\n",
3880 core_addr_to_string (*addr),
3881 (unsigned long)retval);
3882 return retval;
3883 }
3884
3885 static int
3886 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3887 CORE_ADDR addr,
3888 CORE_ADDR start, int length)
3889 {
3890 int retval;
3891
3892 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3893 start, length);
3894
3895 fprintf_filtered (gdb_stdlog,
3896 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3897 core_addr_to_string (addr), core_addr_to_string (start),
3898 length, retval);
3899 return retval;
3900 }
3901
3902 static int
3903 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3904 struct bp_target_info *bp_tgt)
3905 {
3906 int retval;
3907
3908 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3909
3910 fprintf_unfiltered (gdb_stdlog,
3911 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3912 core_addr_to_string (bp_tgt->placed_address),
3913 (unsigned long) retval);
3914 return retval;
3915 }
3916
3917 static int
3918 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3919 struct bp_target_info *bp_tgt)
3920 {
3921 int retval;
3922
3923 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3924
3925 fprintf_unfiltered (gdb_stdlog,
3926 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3927 core_addr_to_string (bp_tgt->placed_address),
3928 (unsigned long) retval);
3929 return retval;
3930 }
3931
3932 static int
3933 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
3934 struct expression *cond)
3935 {
3936 int retval;
3937
3938 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
3939
3940 fprintf_unfiltered (gdb_stdlog,
3941 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3942 core_addr_to_string (addr), len, type,
3943 host_address_to_string (cond), (unsigned long) retval);
3944 return retval;
3945 }
3946
3947 static int
3948 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
3949 struct expression *cond)
3950 {
3951 int retval;
3952
3953 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
3954
3955 fprintf_unfiltered (gdb_stdlog,
3956 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3957 core_addr_to_string (addr), len, type,
3958 host_address_to_string (cond), (unsigned long) retval);
3959 return retval;
3960 }
3961
3962 static void
3963 debug_to_terminal_init (void)
3964 {
3965 debug_target.to_terminal_init ();
3966
3967 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3968 }
3969
3970 static void
3971 debug_to_terminal_inferior (void)
3972 {
3973 debug_target.to_terminal_inferior ();
3974
3975 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3976 }
3977
3978 static void
3979 debug_to_terminal_ours_for_output (void)
3980 {
3981 debug_target.to_terminal_ours_for_output ();
3982
3983 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3984 }
3985
3986 static void
3987 debug_to_terminal_ours (void)
3988 {
3989 debug_target.to_terminal_ours ();
3990
3991 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3992 }
3993
3994 static void
3995 debug_to_terminal_save_ours (void)
3996 {
3997 debug_target.to_terminal_save_ours ();
3998
3999 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4000 }
4001
4002 static void
4003 debug_to_terminal_info (char *arg, int from_tty)
4004 {
4005 debug_target.to_terminal_info (arg, from_tty);
4006
4007 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4008 from_tty);
4009 }
4010
4011 static void
4012 debug_to_load (char *args, int from_tty)
4013 {
4014 debug_target.to_load (args, from_tty);
4015
4016 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4017 }
4018
4019 static void
4020 debug_to_post_startup_inferior (ptid_t ptid)
4021 {
4022 debug_target.to_post_startup_inferior (ptid);
4023
4024 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4025 PIDGET (ptid));
4026 }
4027
4028 static int
4029 debug_to_insert_fork_catchpoint (int pid)
4030 {
4031 int retval;
4032
4033 retval = debug_target.to_insert_fork_catchpoint (pid);
4034
4035 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4036 pid, retval);
4037
4038 return retval;
4039 }
4040
4041 static int
4042 debug_to_remove_fork_catchpoint (int pid)
4043 {
4044 int retval;
4045
4046 retval = debug_target.to_remove_fork_catchpoint (pid);
4047
4048 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4049 pid, retval);
4050
4051 return retval;
4052 }
4053
4054 static int
4055 debug_to_insert_vfork_catchpoint (int pid)
4056 {
4057 int retval;
4058
4059 retval = debug_target.to_insert_vfork_catchpoint (pid);
4060
4061 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4062 pid, retval);
4063
4064 return retval;
4065 }
4066
4067 static int
4068 debug_to_remove_vfork_catchpoint (int pid)
4069 {
4070 int retval;
4071
4072 retval = debug_target.to_remove_vfork_catchpoint (pid);
4073
4074 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4075 pid, retval);
4076
4077 return retval;
4078 }
4079
4080 static int
4081 debug_to_insert_exec_catchpoint (int pid)
4082 {
4083 int retval;
4084
4085 retval = debug_target.to_insert_exec_catchpoint (pid);
4086
4087 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4088 pid, retval);
4089
4090 return retval;
4091 }
4092
4093 static int
4094 debug_to_remove_exec_catchpoint (int pid)
4095 {
4096 int retval;
4097
4098 retval = debug_target.to_remove_exec_catchpoint (pid);
4099
4100 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4101 pid, retval);
4102
4103 return retval;
4104 }
4105
4106 static int
4107 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4108 {
4109 int has_exited;
4110
4111 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4112
4113 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4114 pid, wait_status, *exit_status, has_exited);
4115
4116 return has_exited;
4117 }
4118
4119 static int
4120 debug_to_can_run (void)
4121 {
4122 int retval;
4123
4124 retval = debug_target.to_can_run ();
4125
4126 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4127
4128 return retval;
4129 }
4130
4131 static struct gdbarch *
4132 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4133 {
4134 struct gdbarch *retval;
4135
4136 retval = debug_target.to_thread_architecture (ops, ptid);
4137
4138 fprintf_unfiltered (gdb_stdlog,
4139 "target_thread_architecture (%s) = %s [%s]\n",
4140 target_pid_to_str (ptid),
4141 host_address_to_string (retval),
4142 gdbarch_bfd_arch_info (retval)->printable_name);
4143 return retval;
4144 }
4145
4146 static void
4147 debug_to_stop (ptid_t ptid)
4148 {
4149 debug_target.to_stop (ptid);
4150
4151 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4152 target_pid_to_str (ptid));
4153 }
4154
4155 static void
4156 debug_to_rcmd (char *command,
4157 struct ui_file *outbuf)
4158 {
4159 debug_target.to_rcmd (command, outbuf);
4160 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4161 }
4162
4163 static char *
4164 debug_to_pid_to_exec_file (int pid)
4165 {
4166 char *exec_file;
4167
4168 exec_file = debug_target.to_pid_to_exec_file (pid);
4169
4170 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4171 pid, exec_file);
4172
4173 return exec_file;
4174 }
4175
4176 static void
4177 setup_target_debug (void)
4178 {
4179 memcpy (&debug_target, &current_target, sizeof debug_target);
4180
4181 current_target.to_open = debug_to_open;
4182 current_target.to_post_attach = debug_to_post_attach;
4183 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4184 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4185 current_target.to_files_info = debug_to_files_info;
4186 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4187 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4188 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4189 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4190 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4191 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4192 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4193 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4194 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4195 current_target.to_watchpoint_addr_within_range
4196 = debug_to_watchpoint_addr_within_range;
4197 current_target.to_region_ok_for_hw_watchpoint
4198 = debug_to_region_ok_for_hw_watchpoint;
4199 current_target.to_can_accel_watchpoint_condition
4200 = debug_to_can_accel_watchpoint_condition;
4201 current_target.to_terminal_init = debug_to_terminal_init;
4202 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4203 current_target.to_terminal_ours_for_output
4204 = debug_to_terminal_ours_for_output;
4205 current_target.to_terminal_ours = debug_to_terminal_ours;
4206 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4207 current_target.to_terminal_info = debug_to_terminal_info;
4208 current_target.to_load = debug_to_load;
4209 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4210 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4211 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4212 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4213 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4214 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4215 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4216 current_target.to_has_exited = debug_to_has_exited;
4217 current_target.to_can_run = debug_to_can_run;
4218 current_target.to_stop = debug_to_stop;
4219 current_target.to_rcmd = debug_to_rcmd;
4220 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4221 current_target.to_thread_architecture = debug_to_thread_architecture;
4222 }
4223 \f
4224
4225 static char targ_desc[] =
4226 "Names of targets and files being debugged.\nShows the entire \
4227 stack of targets currently in use (including the exec-file,\n\
4228 core-file, and process, if any), as well as the symbol file name.";
4229
4230 static void
4231 do_monitor_command (char *cmd,
4232 int from_tty)
4233 {
4234 if ((current_target.to_rcmd
4235 == (void (*) (char *, struct ui_file *)) tcomplain)
4236 || (current_target.to_rcmd == debug_to_rcmd
4237 && (debug_target.to_rcmd
4238 == (void (*) (char *, struct ui_file *)) tcomplain)))
4239 error (_("\"monitor\" command not supported by this target."));
4240 target_rcmd (cmd, gdb_stdtarg);
4241 }
4242
4243 /* Print the name of each layers of our target stack. */
4244
4245 static void
4246 maintenance_print_target_stack (char *cmd, int from_tty)
4247 {
4248 struct target_ops *t;
4249
4250 printf_filtered (_("The current target stack is:\n"));
4251
4252 for (t = target_stack; t != NULL; t = t->beneath)
4253 {
4254 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4255 }
4256 }
4257
4258 /* Controls if async mode is permitted. */
4259 int target_async_permitted = 0;
4260
4261 /* The set command writes to this variable. If the inferior is
4262 executing, linux_nat_async_permitted is *not* updated. */
4263 static int target_async_permitted_1 = 0;
4264
4265 static void
4266 set_maintenance_target_async_permitted (char *args, int from_tty,
4267 struct cmd_list_element *c)
4268 {
4269 if (have_live_inferiors ())
4270 {
4271 target_async_permitted_1 = target_async_permitted;
4272 error (_("Cannot change this setting while the inferior is running."));
4273 }
4274
4275 target_async_permitted = target_async_permitted_1;
4276 }
4277
4278 static void
4279 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4280 struct cmd_list_element *c,
4281 const char *value)
4282 {
4283 fprintf_filtered (file,
4284 _("Controlling the inferior in "
4285 "asynchronous mode is %s.\n"), value);
4286 }
4287
4288 /* Temporary copies of permission settings. */
4289
4290 static int may_write_registers_1 = 1;
4291 static int may_write_memory_1 = 1;
4292 static int may_insert_breakpoints_1 = 1;
4293 static int may_insert_tracepoints_1 = 1;
4294 static int may_insert_fast_tracepoints_1 = 1;
4295 static int may_stop_1 = 1;
4296
4297 /* Make the user-set values match the real values again. */
4298
4299 void
4300 update_target_permissions (void)
4301 {
4302 may_write_registers_1 = may_write_registers;
4303 may_write_memory_1 = may_write_memory;
4304 may_insert_breakpoints_1 = may_insert_breakpoints;
4305 may_insert_tracepoints_1 = may_insert_tracepoints;
4306 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4307 may_stop_1 = may_stop;
4308 }
4309
4310 /* The one function handles (most of) the permission flags in the same
4311 way. */
4312
4313 static void
4314 set_target_permissions (char *args, int from_tty,
4315 struct cmd_list_element *c)
4316 {
4317 if (target_has_execution)
4318 {
4319 update_target_permissions ();
4320 error (_("Cannot change this setting while the inferior is running."));
4321 }
4322
4323 /* Make the real values match the user-changed values. */
4324 may_write_registers = may_write_registers_1;
4325 may_insert_breakpoints = may_insert_breakpoints_1;
4326 may_insert_tracepoints = may_insert_tracepoints_1;
4327 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4328 may_stop = may_stop_1;
4329 update_observer_mode ();
4330 }
4331
4332 /* Set memory write permission independently of observer mode. */
4333
4334 static void
4335 set_write_memory_permission (char *args, int from_tty,
4336 struct cmd_list_element *c)
4337 {
4338 /* Make the real values match the user-changed values. */
4339 may_write_memory = may_write_memory_1;
4340 update_observer_mode ();
4341 }
4342
4343
4344 void
4345 initialize_targets (void)
4346 {
4347 init_dummy_target ();
4348 push_target (&dummy_target);
4349
4350 add_info ("target", target_info, targ_desc);
4351 add_info ("files", target_info, targ_desc);
4352
4353 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4354 Set target debugging."), _("\
4355 Show target debugging."), _("\
4356 When non-zero, target debugging is enabled. Higher numbers are more\n\
4357 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4358 command."),
4359 NULL,
4360 show_targetdebug,
4361 &setdebuglist, &showdebuglist);
4362
4363 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4364 &trust_readonly, _("\
4365 Set mode for reading from readonly sections."), _("\
4366 Show mode for reading from readonly sections."), _("\
4367 When this mode is on, memory reads from readonly sections (such as .text)\n\
4368 will be read from the object file instead of from the target. This will\n\
4369 result in significant performance improvement for remote targets."),
4370 NULL,
4371 show_trust_readonly,
4372 &setlist, &showlist);
4373
4374 add_com ("monitor", class_obscure, do_monitor_command,
4375 _("Send a command to the remote monitor (remote targets only)."));
4376
4377 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4378 _("Print the name of each layer of the internal target stack."),
4379 &maintenanceprintlist);
4380
4381 add_setshow_boolean_cmd ("target-async", no_class,
4382 &target_async_permitted_1, _("\
4383 Set whether gdb controls the inferior in asynchronous mode."), _("\
4384 Show whether gdb controls the inferior in asynchronous mode."), _("\
4385 Tells gdb whether to control the inferior in asynchronous mode."),
4386 set_maintenance_target_async_permitted,
4387 show_maintenance_target_async_permitted,
4388 &setlist,
4389 &showlist);
4390
4391 add_setshow_boolean_cmd ("stack-cache", class_support,
4392 &stack_cache_enabled_p_1, _("\
4393 Set cache use for stack access."), _("\
4394 Show cache use for stack access."), _("\
4395 When on, use the data cache for all stack access, regardless of any\n\
4396 configured memory regions. This improves remote performance significantly.\n\
4397 By default, caching for stack access is on."),
4398 set_stack_cache_enabled_p,
4399 show_stack_cache_enabled_p,
4400 &setlist, &showlist);
4401
4402 add_setshow_boolean_cmd ("may-write-registers", class_support,
4403 &may_write_registers_1, _("\
4404 Set permission to write into registers."), _("\
4405 Show permission to write into registers."), _("\
4406 When this permission is on, GDB may write into the target's registers.\n\
4407 Otherwise, any sort of write attempt will result in an error."),
4408 set_target_permissions, NULL,
4409 &setlist, &showlist);
4410
4411 add_setshow_boolean_cmd ("may-write-memory", class_support,
4412 &may_write_memory_1, _("\
4413 Set permission to write into target memory."), _("\
4414 Show permission to write into target memory."), _("\
4415 When this permission is on, GDB may write into the target's memory.\n\
4416 Otherwise, any sort of write attempt will result in an error."),
4417 set_write_memory_permission, NULL,
4418 &setlist, &showlist);
4419
4420 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4421 &may_insert_breakpoints_1, _("\
4422 Set permission to insert breakpoints in the target."), _("\
4423 Show permission to insert breakpoints in the target."), _("\
4424 When this permission is on, GDB may insert breakpoints in the program.\n\
4425 Otherwise, any sort of insertion attempt will result in an error."),
4426 set_target_permissions, NULL,
4427 &setlist, &showlist);
4428
4429 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4430 &may_insert_tracepoints_1, _("\
4431 Set permission to insert tracepoints in the target."), _("\
4432 Show permission to insert tracepoints in the target."), _("\
4433 When this permission is on, GDB may insert tracepoints in the program.\n\
4434 Otherwise, any sort of insertion attempt will result in an error."),
4435 set_target_permissions, NULL,
4436 &setlist, &showlist);
4437
4438 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4439 &may_insert_fast_tracepoints_1, _("\
4440 Set permission to insert fast tracepoints in the target."), _("\
4441 Show permission to insert fast tracepoints in the target."), _("\
4442 When this permission is on, GDB may insert fast tracepoints.\n\
4443 Otherwise, any sort of insertion attempt will result in an error."),
4444 set_target_permissions, NULL,
4445 &setlist, &showlist);
4446
4447 add_setshow_boolean_cmd ("may-interrupt", class_support,
4448 &may_stop_1, _("\
4449 Set permission to interrupt or signal the target."), _("\
4450 Show permission to interrupt or signal the target."), _("\
4451 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4452 Otherwise, any attempt to interrupt or stop will be ignored."),
4453 set_target_permissions, NULL,
4454 &setlist, &showlist);
4455
4456
4457 target_dcache = dcache_init ();
4458 }
This page took 0.155175 seconds and 4 git commands to generate.