minor cleanups to update_current_target
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47 #include "auxv.h"
48
49 static void target_info (char *, int);
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static void tcomplain (void) ATTRIBUTE_NORETURN;
77
78 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
79
80 static int return_zero (void);
81
82 void target_ignore (void);
83
84 static void target_command (char *, int);
85
86 static struct target_ops *find_default_run_target (char *);
87
88 static target_xfer_partial_ftype default_xfer_partial;
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static int dummy_find_memory_regions (struct target_ops *self,
94 find_memory_region_ftype ignore1,
95 void *ignore2);
96
97 static char *dummy_make_corefile_notes (struct target_ops *self,
98 bfd *ignore1, int *ignore2);
99
100 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
101
102 static int find_default_can_async_p (struct target_ops *ignore);
103
104 static int find_default_is_async_p (struct target_ops *ignore);
105
106 static enum exec_direction_kind default_execution_direction
107 (struct target_ops *self);
108
109 #include "target-delegates.c"
110
111 static void init_dummy_target (void);
112
113 static struct target_ops debug_target;
114
115 static void debug_to_open (char *, int);
116
117 static void debug_to_prepare_to_store (struct target_ops *self,
118 struct regcache *);
119
120 static void debug_to_files_info (struct target_ops *);
121
122 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
123 struct bp_target_info *);
124
125 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
126 struct bp_target_info *);
127
128 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
129 int, int, int);
130
131 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
132 struct gdbarch *,
133 struct bp_target_info *);
134
135 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
136 struct gdbarch *,
137 struct bp_target_info *);
138
139 static int debug_to_insert_watchpoint (struct target_ops *self,
140 CORE_ADDR, int, int,
141 struct expression *);
142
143 static int debug_to_remove_watchpoint (struct target_ops *self,
144 CORE_ADDR, int, int,
145 struct expression *);
146
147 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
148
149 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
150 CORE_ADDR, CORE_ADDR, int);
151
152 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
153 CORE_ADDR, int);
154
155 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
156 CORE_ADDR, int, int,
157 struct expression *);
158
159 static void debug_to_terminal_init (struct target_ops *self);
160
161 static void debug_to_terminal_inferior (struct target_ops *self);
162
163 static void debug_to_terminal_ours_for_output (struct target_ops *self);
164
165 static void debug_to_terminal_save_ours (struct target_ops *self);
166
167 static void debug_to_terminal_ours (struct target_ops *self);
168
169 static void debug_to_load (struct target_ops *self, char *, int);
170
171 static int debug_to_can_run (struct target_ops *self);
172
173 static void debug_to_stop (struct target_ops *self, ptid_t);
174
175 /* Pointer to array of target architecture structures; the size of the
176 array; the current index into the array; the allocated size of the
177 array. */
178 struct target_ops **target_structs;
179 unsigned target_struct_size;
180 unsigned target_struct_allocsize;
181 #define DEFAULT_ALLOCSIZE 10
182
183 /* The initial current target, so that there is always a semi-valid
184 current target. */
185
186 static struct target_ops dummy_target;
187
188 /* Top of target stack. */
189
190 static struct target_ops *target_stack;
191
192 /* The target structure we are currently using to talk to a process
193 or file or whatever "inferior" we have. */
194
195 struct target_ops current_target;
196
197 /* Command list for target. */
198
199 static struct cmd_list_element *targetlist = NULL;
200
201 /* Nonzero if we should trust readonly sections from the
202 executable when reading memory. */
203
204 static int trust_readonly = 0;
205
206 /* Nonzero if we should show true memory content including
207 memory breakpoint inserted by gdb. */
208
209 static int show_memory_breakpoints = 0;
210
211 /* These globals control whether GDB attempts to perform these
212 operations; they are useful for targets that need to prevent
213 inadvertant disruption, such as in non-stop mode. */
214
215 int may_write_registers = 1;
216
217 int may_write_memory = 1;
218
219 int may_insert_breakpoints = 1;
220
221 int may_insert_tracepoints = 1;
222
223 int may_insert_fast_tracepoints = 1;
224
225 int may_stop = 1;
226
227 /* Non-zero if we want to see trace of target level stuff. */
228
229 static unsigned int targetdebug = 0;
230 static void
231 show_targetdebug (struct ui_file *file, int from_tty,
232 struct cmd_list_element *c, const char *value)
233 {
234 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
235 }
236
237 static void setup_target_debug (void);
238
239 /* The user just typed 'target' without the name of a target. */
240
241 static void
242 target_command (char *arg, int from_tty)
243 {
244 fputs_filtered ("Argument required (target name). Try `help target'\n",
245 gdb_stdout);
246 }
247
248 /* Default target_has_* methods for process_stratum targets. */
249
250 int
251 default_child_has_all_memory (struct target_ops *ops)
252 {
253 /* If no inferior selected, then we can't read memory here. */
254 if (ptid_equal (inferior_ptid, null_ptid))
255 return 0;
256
257 return 1;
258 }
259
260 int
261 default_child_has_memory (struct target_ops *ops)
262 {
263 /* If no inferior selected, then we can't read memory here. */
264 if (ptid_equal (inferior_ptid, null_ptid))
265 return 0;
266
267 return 1;
268 }
269
270 int
271 default_child_has_stack (struct target_ops *ops)
272 {
273 /* If no inferior selected, there's no stack. */
274 if (ptid_equal (inferior_ptid, null_ptid))
275 return 0;
276
277 return 1;
278 }
279
280 int
281 default_child_has_registers (struct target_ops *ops)
282 {
283 /* Can't read registers from no inferior. */
284 if (ptid_equal (inferior_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290 int
291 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
292 {
293 /* If there's no thread selected, then we can't make it run through
294 hoops. */
295 if (ptid_equal (the_ptid, null_ptid))
296 return 0;
297
298 return 1;
299 }
300
301
302 int
303 target_has_all_memory_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_all_memory (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_memory_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_memory (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_stack_1 (void)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_stack (t))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_registers_1 (void)
340 {
341 struct target_ops *t;
342
343 for (t = current_target.beneath; t != NULL; t = t->beneath)
344 if (t->to_has_registers (t))
345 return 1;
346
347 return 0;
348 }
349
350 int
351 target_has_execution_1 (ptid_t the_ptid)
352 {
353 struct target_ops *t;
354
355 for (t = current_target.beneath; t != NULL; t = t->beneath)
356 if (t->to_has_execution (t, the_ptid))
357 return 1;
358
359 return 0;
360 }
361
362 int
363 target_has_execution_current (void)
364 {
365 return target_has_execution_1 (inferior_ptid);
366 }
367
368 /* Complete initialization of T. This ensures that various fields in
369 T are set, if needed by the target implementation. */
370
371 void
372 complete_target_initialization (struct target_ops *t)
373 {
374 /* Provide default values for all "must have" methods. */
375 if (t->to_xfer_partial == NULL)
376 t->to_xfer_partial = default_xfer_partial;
377
378 if (t->to_has_all_memory == NULL)
379 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
380
381 if (t->to_has_memory == NULL)
382 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
383
384 if (t->to_has_stack == NULL)
385 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
386
387 if (t->to_has_registers == NULL)
388 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
389
390 if (t->to_has_execution == NULL)
391 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
392
393 install_delegators (t);
394 }
395
396 /* Add possible target architecture T to the list and add a new
397 command 'target T->to_shortname'. Set COMPLETER as the command's
398 completer if not NULL. */
399
400 void
401 add_target_with_completer (struct target_ops *t,
402 completer_ftype *completer)
403 {
404 struct cmd_list_element *c;
405
406 complete_target_initialization (t);
407
408 if (!target_structs)
409 {
410 target_struct_allocsize = DEFAULT_ALLOCSIZE;
411 target_structs = (struct target_ops **) xmalloc
412 (target_struct_allocsize * sizeof (*target_structs));
413 }
414 if (target_struct_size >= target_struct_allocsize)
415 {
416 target_struct_allocsize *= 2;
417 target_structs = (struct target_ops **)
418 xrealloc ((char *) target_structs,
419 target_struct_allocsize * sizeof (*target_structs));
420 }
421 target_structs[target_struct_size++] = t;
422
423 if (targetlist == NULL)
424 add_prefix_cmd ("target", class_run, target_command, _("\
425 Connect to a target machine or process.\n\
426 The first argument is the type or protocol of the target machine.\n\
427 Remaining arguments are interpreted by the target protocol. For more\n\
428 information on the arguments for a particular protocol, type\n\
429 `help target ' followed by the protocol name."),
430 &targetlist, "target ", 0, &cmdlist);
431 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
432 &targetlist);
433 if (completer != NULL)
434 set_cmd_completer (c, completer);
435 }
436
437 /* Add a possible target architecture to the list. */
438
439 void
440 add_target (struct target_ops *t)
441 {
442 add_target_with_completer (t, NULL);
443 }
444
445 /* See target.h. */
446
447 void
448 add_deprecated_target_alias (struct target_ops *t, char *alias)
449 {
450 struct cmd_list_element *c;
451 char *alt;
452
453 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
454 see PR cli/15104. */
455 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
456 alt = xstrprintf ("target %s", t->to_shortname);
457 deprecate_cmd (c, alt);
458 }
459
460 /* Stub functions */
461
462 void
463 target_ignore (void)
464 {
465 }
466
467 void
468 target_kill (void)
469 {
470 if (targetdebug)
471 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
472
473 current_target.to_kill (&current_target);
474 }
475
476 void
477 target_load (char *arg, int from_tty)
478 {
479 target_dcache_invalidate ();
480 (*current_target.to_load) (&current_target, arg, from_tty);
481 }
482
483 void
484 target_create_inferior (char *exec_file, char *args,
485 char **env, int from_tty)
486 {
487 struct target_ops *t;
488
489 for (t = current_target.beneath; t != NULL; t = t->beneath)
490 {
491 if (t->to_create_inferior != NULL)
492 {
493 t->to_create_inferior (t, exec_file, args, env, from_tty);
494 if (targetdebug)
495 fprintf_unfiltered (gdb_stdlog,
496 "target_create_inferior (%s, %s, xxx, %d)\n",
497 exec_file, args, from_tty);
498 return;
499 }
500 }
501
502 internal_error (__FILE__, __LINE__,
503 _("could not find a target to create inferior"));
504 }
505
506 void
507 target_terminal_inferior (void)
508 {
509 /* A background resume (``run&'') should leave GDB in control of the
510 terminal. Use target_can_async_p, not target_is_async_p, since at
511 this point the target is not async yet. However, if sync_execution
512 is not set, we know it will become async prior to resume. */
513 if (target_can_async_p () && !sync_execution)
514 return;
515
516 /* If GDB is resuming the inferior in the foreground, install
517 inferior's terminal modes. */
518 (*current_target.to_terminal_inferior) (&current_target);
519 }
520
521 static int
522 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
523 struct target_ops *t)
524 {
525 errno = EIO; /* Can't read/write this location. */
526 return 0; /* No bytes handled. */
527 }
528
529 static void
530 tcomplain (void)
531 {
532 error (_("You can't do that when your target is `%s'"),
533 current_target.to_shortname);
534 }
535
536 void
537 noprocess (void)
538 {
539 error (_("You can't do that without a process to debug."));
540 }
541
542 static void
543 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
544 {
545 printf_unfiltered (_("No saved terminal information.\n"));
546 }
547
548 /* A default implementation for the to_get_ada_task_ptid target method.
549
550 This function builds the PTID by using both LWP and TID as part of
551 the PTID lwp and tid elements. The pid used is the pid of the
552 inferior_ptid. */
553
554 static ptid_t
555 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
556 {
557 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
558 }
559
560 static enum exec_direction_kind
561 default_execution_direction (struct target_ops *self)
562 {
563 if (!target_can_execute_reverse)
564 return EXEC_FORWARD;
565 else if (!target_can_async_p ())
566 return EXEC_FORWARD;
567 else
568 gdb_assert_not_reached ("\
569 to_execution_direction must be implemented for reverse async");
570 }
571
572 /* Go through the target stack from top to bottom, copying over zero
573 entries in current_target, then filling in still empty entries. In
574 effect, we are doing class inheritance through the pushed target
575 vectors.
576
577 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
578 is currently implemented, is that it discards any knowledge of
579 which target an inherited method originally belonged to.
580 Consequently, new new target methods should instead explicitly and
581 locally search the target stack for the target that can handle the
582 request. */
583
584 static void
585 update_current_target (void)
586 {
587 struct target_ops *t;
588
589 /* First, reset current's contents. */
590 memset (&current_target, 0, sizeof (current_target));
591
592 /* Install the delegators. */
593 install_delegators (&current_target);
594
595 current_target.to_stratum = target_stack->to_stratum;
596
597 #define INHERIT(FIELD, TARGET) \
598 if (!current_target.FIELD) \
599 current_target.FIELD = (TARGET)->FIELD
600
601 /* Do not add any new INHERITs here. Instead, use the delegation
602 mechanism provided by make-target-delegates. */
603 for (t = target_stack; t; t = t->beneath)
604 {
605 INHERIT (to_shortname, t);
606 INHERIT (to_longname, t);
607 INHERIT (to_doc, t);
608 INHERIT (to_attach_no_wait, t);
609 INHERIT (deprecated_xfer_memory, t);
610 INHERIT (to_have_steppable_watchpoint, t);
611 INHERIT (to_have_continuable_watchpoint, t);
612 INHERIT (to_has_thread_control, t);
613 INHERIT (to_magic, t);
614 }
615 #undef INHERIT
616
617 /* Clean up a target struct so it no longer has any zero pointers in
618 it. Do not add any new de_faults here. Instead, use the
619 delegation mechanism provided by make-target-delegates. */
620
621 #define de_fault(field, value) \
622 if (!current_target.field) \
623 current_target.field = value
624
625 de_fault (to_open,
626 (void (*) (char *, int))
627 tcomplain);
628 de_fault (to_close,
629 (void (*) (struct target_ops *))
630 target_ignore);
631 de_fault (deprecated_xfer_memory,
632 (int (*) (CORE_ADDR, gdb_byte *, int, int,
633 struct mem_attrib *, struct target_ops *))
634 nomemory);
635
636 #undef de_fault
637
638 /* Finally, position the target-stack beneath the squashed
639 "current_target". That way code looking for a non-inherited
640 target method can quickly and simply find it. */
641 current_target.beneath = target_stack;
642
643 if (targetdebug)
644 setup_target_debug ();
645 }
646
647 /* Push a new target type into the stack of the existing target accessors,
648 possibly superseding some of the existing accessors.
649
650 Rather than allow an empty stack, we always have the dummy target at
651 the bottom stratum, so we can call the function vectors without
652 checking them. */
653
654 void
655 push_target (struct target_ops *t)
656 {
657 struct target_ops **cur;
658
659 /* Check magic number. If wrong, it probably means someone changed
660 the struct definition, but not all the places that initialize one. */
661 if (t->to_magic != OPS_MAGIC)
662 {
663 fprintf_unfiltered (gdb_stderr,
664 "Magic number of %s target struct wrong\n",
665 t->to_shortname);
666 internal_error (__FILE__, __LINE__,
667 _("failed internal consistency check"));
668 }
669
670 /* Find the proper stratum to install this target in. */
671 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
672 {
673 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
674 break;
675 }
676
677 /* If there's already targets at this stratum, remove them. */
678 /* FIXME: cagney/2003-10-15: I think this should be popping all
679 targets to CUR, and not just those at this stratum level. */
680 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
681 {
682 /* There's already something at this stratum level. Close it,
683 and un-hook it from the stack. */
684 struct target_ops *tmp = (*cur);
685
686 (*cur) = (*cur)->beneath;
687 tmp->beneath = NULL;
688 target_close (tmp);
689 }
690
691 /* We have removed all targets in our stratum, now add the new one. */
692 t->beneath = (*cur);
693 (*cur) = t;
694
695 update_current_target ();
696 }
697
698 /* Remove a target_ops vector from the stack, wherever it may be.
699 Return how many times it was removed (0 or 1). */
700
701 int
702 unpush_target (struct target_ops *t)
703 {
704 struct target_ops **cur;
705 struct target_ops *tmp;
706
707 if (t->to_stratum == dummy_stratum)
708 internal_error (__FILE__, __LINE__,
709 _("Attempt to unpush the dummy target"));
710
711 /* Look for the specified target. Note that we assume that a target
712 can only occur once in the target stack. */
713
714 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
715 {
716 if ((*cur) == t)
717 break;
718 }
719
720 /* If we don't find target_ops, quit. Only open targets should be
721 closed. */
722 if ((*cur) == NULL)
723 return 0;
724
725 /* Unchain the target. */
726 tmp = (*cur);
727 (*cur) = (*cur)->beneath;
728 tmp->beneath = NULL;
729
730 update_current_target ();
731
732 /* Finally close the target. Note we do this after unchaining, so
733 any target method calls from within the target_close
734 implementation don't end up in T anymore. */
735 target_close (t);
736
737 return 1;
738 }
739
740 void
741 pop_all_targets_above (enum strata above_stratum)
742 {
743 while ((int) (current_target.to_stratum) > (int) above_stratum)
744 {
745 if (!unpush_target (target_stack))
746 {
747 fprintf_unfiltered (gdb_stderr,
748 "pop_all_targets couldn't find target %s\n",
749 target_stack->to_shortname);
750 internal_error (__FILE__, __LINE__,
751 _("failed internal consistency check"));
752 break;
753 }
754 }
755 }
756
757 void
758 pop_all_targets (void)
759 {
760 pop_all_targets_above (dummy_stratum);
761 }
762
763 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
764
765 int
766 target_is_pushed (struct target_ops *t)
767 {
768 struct target_ops **cur;
769
770 /* Check magic number. If wrong, it probably means someone changed
771 the struct definition, but not all the places that initialize one. */
772 if (t->to_magic != OPS_MAGIC)
773 {
774 fprintf_unfiltered (gdb_stderr,
775 "Magic number of %s target struct wrong\n",
776 t->to_shortname);
777 internal_error (__FILE__, __LINE__,
778 _("failed internal consistency check"));
779 }
780
781 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
782 if (*cur == t)
783 return 1;
784
785 return 0;
786 }
787
788 /* Using the objfile specified in OBJFILE, find the address for the
789 current thread's thread-local storage with offset OFFSET. */
790 CORE_ADDR
791 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
792 {
793 volatile CORE_ADDR addr = 0;
794 struct target_ops *target;
795
796 for (target = current_target.beneath;
797 target != NULL;
798 target = target->beneath)
799 {
800 if (target->to_get_thread_local_address != NULL)
801 break;
802 }
803
804 if (target != NULL
805 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
806 {
807 ptid_t ptid = inferior_ptid;
808 volatile struct gdb_exception ex;
809
810 TRY_CATCH (ex, RETURN_MASK_ALL)
811 {
812 CORE_ADDR lm_addr;
813
814 /* Fetch the load module address for this objfile. */
815 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
816 objfile);
817 /* If it's 0, throw the appropriate exception. */
818 if (lm_addr == 0)
819 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
820 _("TLS load module not found"));
821
822 addr = target->to_get_thread_local_address (target, ptid,
823 lm_addr, offset);
824 }
825 /* If an error occurred, print TLS related messages here. Otherwise,
826 throw the error to some higher catcher. */
827 if (ex.reason < 0)
828 {
829 int objfile_is_library = (objfile->flags & OBJF_SHARED);
830
831 switch (ex.error)
832 {
833 case TLS_NO_LIBRARY_SUPPORT_ERROR:
834 error (_("Cannot find thread-local variables "
835 "in this thread library."));
836 break;
837 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
838 if (objfile_is_library)
839 error (_("Cannot find shared library `%s' in dynamic"
840 " linker's load module list"), objfile_name (objfile));
841 else
842 error (_("Cannot find executable file `%s' in dynamic"
843 " linker's load module list"), objfile_name (objfile));
844 break;
845 case TLS_NOT_ALLOCATED_YET_ERROR:
846 if (objfile_is_library)
847 error (_("The inferior has not yet allocated storage for"
848 " thread-local variables in\n"
849 "the shared library `%s'\n"
850 "for %s"),
851 objfile_name (objfile), target_pid_to_str (ptid));
852 else
853 error (_("The inferior has not yet allocated storage for"
854 " thread-local variables in\n"
855 "the executable `%s'\n"
856 "for %s"),
857 objfile_name (objfile), target_pid_to_str (ptid));
858 break;
859 case TLS_GENERIC_ERROR:
860 if (objfile_is_library)
861 error (_("Cannot find thread-local storage for %s, "
862 "shared library %s:\n%s"),
863 target_pid_to_str (ptid),
864 objfile_name (objfile), ex.message);
865 else
866 error (_("Cannot find thread-local storage for %s, "
867 "executable file %s:\n%s"),
868 target_pid_to_str (ptid),
869 objfile_name (objfile), ex.message);
870 break;
871 default:
872 throw_exception (ex);
873 break;
874 }
875 }
876 }
877 /* It wouldn't be wrong here to try a gdbarch method, too; finding
878 TLS is an ABI-specific thing. But we don't do that yet. */
879 else
880 error (_("Cannot find thread-local variables on this target"));
881
882 return addr;
883 }
884
885 const char *
886 target_xfer_status_to_string (enum target_xfer_status err)
887 {
888 #define CASE(X) case X: return #X
889 switch (err)
890 {
891 CASE(TARGET_XFER_E_IO);
892 CASE(TARGET_XFER_E_UNAVAILABLE);
893 default:
894 return "<unknown>";
895 }
896 #undef CASE
897 };
898
899
900 #undef MIN
901 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
902
903 /* target_read_string -- read a null terminated string, up to LEN bytes,
904 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
905 Set *STRING to a pointer to malloc'd memory containing the data; the caller
906 is responsible for freeing it. Return the number of bytes successfully
907 read. */
908
909 int
910 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
911 {
912 int tlen, offset, i;
913 gdb_byte buf[4];
914 int errcode = 0;
915 char *buffer;
916 int buffer_allocated;
917 char *bufptr;
918 unsigned int nbytes_read = 0;
919
920 gdb_assert (string);
921
922 /* Small for testing. */
923 buffer_allocated = 4;
924 buffer = xmalloc (buffer_allocated);
925 bufptr = buffer;
926
927 while (len > 0)
928 {
929 tlen = MIN (len, 4 - (memaddr & 3));
930 offset = memaddr & 3;
931
932 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
933 if (errcode != 0)
934 {
935 /* The transfer request might have crossed the boundary to an
936 unallocated region of memory. Retry the transfer, requesting
937 a single byte. */
938 tlen = 1;
939 offset = 0;
940 errcode = target_read_memory (memaddr, buf, 1);
941 if (errcode != 0)
942 goto done;
943 }
944
945 if (bufptr - buffer + tlen > buffer_allocated)
946 {
947 unsigned int bytes;
948
949 bytes = bufptr - buffer;
950 buffer_allocated *= 2;
951 buffer = xrealloc (buffer, buffer_allocated);
952 bufptr = buffer + bytes;
953 }
954
955 for (i = 0; i < tlen; i++)
956 {
957 *bufptr++ = buf[i + offset];
958 if (buf[i + offset] == '\000')
959 {
960 nbytes_read += i + 1;
961 goto done;
962 }
963 }
964
965 memaddr += tlen;
966 len -= tlen;
967 nbytes_read += tlen;
968 }
969 done:
970 *string = buffer;
971 if (errnop != NULL)
972 *errnop = errcode;
973 return nbytes_read;
974 }
975
976 struct target_section_table *
977 target_get_section_table (struct target_ops *target)
978 {
979 if (targetdebug)
980 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
981
982 return (*target->to_get_section_table) (target);
983 }
984
985 /* Find a section containing ADDR. */
986
987 struct target_section *
988 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
989 {
990 struct target_section_table *table = target_get_section_table (target);
991 struct target_section *secp;
992
993 if (table == NULL)
994 return NULL;
995
996 for (secp = table->sections; secp < table->sections_end; secp++)
997 {
998 if (addr >= secp->addr && addr < secp->endaddr)
999 return secp;
1000 }
1001 return NULL;
1002 }
1003
1004 /* Read memory from the live target, even if currently inspecting a
1005 traceframe. The return is the same as that of target_read. */
1006
1007 static enum target_xfer_status
1008 target_read_live_memory (enum target_object object,
1009 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1010 ULONGEST *xfered_len)
1011 {
1012 enum target_xfer_status ret;
1013 struct cleanup *cleanup;
1014
1015 /* Switch momentarily out of tfind mode so to access live memory.
1016 Note that this must not clear global state, such as the frame
1017 cache, which must still remain valid for the previous traceframe.
1018 We may be _building_ the frame cache at this point. */
1019 cleanup = make_cleanup_restore_traceframe_number ();
1020 set_traceframe_number (-1);
1021
1022 ret = target_xfer_partial (current_target.beneath, object, NULL,
1023 myaddr, NULL, memaddr, len, xfered_len);
1024
1025 do_cleanups (cleanup);
1026 return ret;
1027 }
1028
1029 /* Using the set of read-only target sections of OPS, read live
1030 read-only memory. Note that the actual reads start from the
1031 top-most target again.
1032
1033 For interface/parameters/return description see target.h,
1034 to_xfer_partial. */
1035
1036 static enum target_xfer_status
1037 memory_xfer_live_readonly_partial (struct target_ops *ops,
1038 enum target_object object,
1039 gdb_byte *readbuf, ULONGEST memaddr,
1040 ULONGEST len, ULONGEST *xfered_len)
1041 {
1042 struct target_section *secp;
1043 struct target_section_table *table;
1044
1045 secp = target_section_by_addr (ops, memaddr);
1046 if (secp != NULL
1047 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1048 secp->the_bfd_section)
1049 & SEC_READONLY))
1050 {
1051 struct target_section *p;
1052 ULONGEST memend = memaddr + len;
1053
1054 table = target_get_section_table (ops);
1055
1056 for (p = table->sections; p < table->sections_end; p++)
1057 {
1058 if (memaddr >= p->addr)
1059 {
1060 if (memend <= p->endaddr)
1061 {
1062 /* Entire transfer is within this section. */
1063 return target_read_live_memory (object, memaddr,
1064 readbuf, len, xfered_len);
1065 }
1066 else if (memaddr >= p->endaddr)
1067 {
1068 /* This section ends before the transfer starts. */
1069 continue;
1070 }
1071 else
1072 {
1073 /* This section overlaps the transfer. Just do half. */
1074 len = p->endaddr - memaddr;
1075 return target_read_live_memory (object, memaddr,
1076 readbuf, len, xfered_len);
1077 }
1078 }
1079 }
1080 }
1081
1082 return TARGET_XFER_EOF;
1083 }
1084
1085 /* Read memory from more than one valid target. A core file, for
1086 instance, could have some of memory but delegate other bits to
1087 the target below it. So, we must manually try all targets. */
1088
1089 static enum target_xfer_status
1090 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1091 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1092 ULONGEST *xfered_len)
1093 {
1094 enum target_xfer_status res;
1095
1096 do
1097 {
1098 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1099 readbuf, writebuf, memaddr, len,
1100 xfered_len);
1101 if (res == TARGET_XFER_OK)
1102 break;
1103
1104 /* Stop if the target reports that the memory is not available. */
1105 if (res == TARGET_XFER_E_UNAVAILABLE)
1106 break;
1107
1108 /* We want to continue past core files to executables, but not
1109 past a running target's memory. */
1110 if (ops->to_has_all_memory (ops))
1111 break;
1112
1113 ops = ops->beneath;
1114 }
1115 while (ops != NULL);
1116
1117 return res;
1118 }
1119
1120 /* Perform a partial memory transfer.
1121 For docs see target.h, to_xfer_partial. */
1122
1123 static enum target_xfer_status
1124 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1125 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1126 ULONGEST len, ULONGEST *xfered_len)
1127 {
1128 enum target_xfer_status res;
1129 int reg_len;
1130 struct mem_region *region;
1131 struct inferior *inf;
1132
1133 /* For accesses to unmapped overlay sections, read directly from
1134 files. Must do this first, as MEMADDR may need adjustment. */
1135 if (readbuf != NULL && overlay_debugging)
1136 {
1137 struct obj_section *section = find_pc_overlay (memaddr);
1138
1139 if (pc_in_unmapped_range (memaddr, section))
1140 {
1141 struct target_section_table *table
1142 = target_get_section_table (ops);
1143 const char *section_name = section->the_bfd_section->name;
1144
1145 memaddr = overlay_mapped_address (memaddr, section);
1146 return section_table_xfer_memory_partial (readbuf, writebuf,
1147 memaddr, len, xfered_len,
1148 table->sections,
1149 table->sections_end,
1150 section_name);
1151 }
1152 }
1153
1154 /* Try the executable files, if "trust-readonly-sections" is set. */
1155 if (readbuf != NULL && trust_readonly)
1156 {
1157 struct target_section *secp;
1158 struct target_section_table *table;
1159
1160 secp = target_section_by_addr (ops, memaddr);
1161 if (secp != NULL
1162 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1163 secp->the_bfd_section)
1164 & SEC_READONLY))
1165 {
1166 table = target_get_section_table (ops);
1167 return section_table_xfer_memory_partial (readbuf, writebuf,
1168 memaddr, len, xfered_len,
1169 table->sections,
1170 table->sections_end,
1171 NULL);
1172 }
1173 }
1174
1175 /* If reading unavailable memory in the context of traceframes, and
1176 this address falls within a read-only section, fallback to
1177 reading from live memory. */
1178 if (readbuf != NULL && get_traceframe_number () != -1)
1179 {
1180 VEC(mem_range_s) *available;
1181
1182 /* If we fail to get the set of available memory, then the
1183 target does not support querying traceframe info, and so we
1184 attempt reading from the traceframe anyway (assuming the
1185 target implements the old QTro packet then). */
1186 if (traceframe_available_memory (&available, memaddr, len))
1187 {
1188 struct cleanup *old_chain;
1189
1190 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1191
1192 if (VEC_empty (mem_range_s, available)
1193 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1194 {
1195 /* Don't read into the traceframe's available
1196 memory. */
1197 if (!VEC_empty (mem_range_s, available))
1198 {
1199 LONGEST oldlen = len;
1200
1201 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1202 gdb_assert (len <= oldlen);
1203 }
1204
1205 do_cleanups (old_chain);
1206
1207 /* This goes through the topmost target again. */
1208 res = memory_xfer_live_readonly_partial (ops, object,
1209 readbuf, memaddr,
1210 len, xfered_len);
1211 if (res == TARGET_XFER_OK)
1212 return TARGET_XFER_OK;
1213 else
1214 {
1215 /* No use trying further, we know some memory starting
1216 at MEMADDR isn't available. */
1217 *xfered_len = len;
1218 return TARGET_XFER_E_UNAVAILABLE;
1219 }
1220 }
1221
1222 /* Don't try to read more than how much is available, in
1223 case the target implements the deprecated QTro packet to
1224 cater for older GDBs (the target's knowledge of read-only
1225 sections may be outdated by now). */
1226 len = VEC_index (mem_range_s, available, 0)->length;
1227
1228 do_cleanups (old_chain);
1229 }
1230 }
1231
1232 /* Try GDB's internal data cache. */
1233 region = lookup_mem_region (memaddr);
1234 /* region->hi == 0 means there's no upper bound. */
1235 if (memaddr + len < region->hi || region->hi == 0)
1236 reg_len = len;
1237 else
1238 reg_len = region->hi - memaddr;
1239
1240 switch (region->attrib.mode)
1241 {
1242 case MEM_RO:
1243 if (writebuf != NULL)
1244 return TARGET_XFER_E_IO;
1245 break;
1246
1247 case MEM_WO:
1248 if (readbuf != NULL)
1249 return TARGET_XFER_E_IO;
1250 break;
1251
1252 case MEM_FLASH:
1253 /* We only support writing to flash during "load" for now. */
1254 if (writebuf != NULL)
1255 error (_("Writing to flash memory forbidden in this context"));
1256 break;
1257
1258 case MEM_NONE:
1259 return TARGET_XFER_E_IO;
1260 }
1261
1262 if (!ptid_equal (inferior_ptid, null_ptid))
1263 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1264 else
1265 inf = NULL;
1266
1267 if (inf != NULL
1268 /* The dcache reads whole cache lines; that doesn't play well
1269 with reading from a trace buffer, because reading outside of
1270 the collected memory range fails. */
1271 && get_traceframe_number () == -1
1272 && (region->attrib.cache
1273 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1274 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1275 {
1276 DCACHE *dcache = target_dcache_get_or_init ();
1277 int l;
1278
1279 if (readbuf != NULL)
1280 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1281 else
1282 /* FIXME drow/2006-08-09: If we're going to preserve const
1283 correctness dcache_xfer_memory should take readbuf and
1284 writebuf. */
1285 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1286 reg_len, 1);
1287 if (l <= 0)
1288 return TARGET_XFER_E_IO;
1289 else
1290 {
1291 *xfered_len = (ULONGEST) l;
1292 return TARGET_XFER_OK;
1293 }
1294 }
1295
1296 /* If none of those methods found the memory we wanted, fall back
1297 to a target partial transfer. Normally a single call to
1298 to_xfer_partial is enough; if it doesn't recognize an object
1299 it will call the to_xfer_partial of the next target down.
1300 But for memory this won't do. Memory is the only target
1301 object which can be read from more than one valid target.
1302 A core file, for instance, could have some of memory but
1303 delegate other bits to the target below it. So, we must
1304 manually try all targets. */
1305
1306 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1307 xfered_len);
1308
1309 /* Make sure the cache gets updated no matter what - if we are writing
1310 to the stack. Even if this write is not tagged as such, we still need
1311 to update the cache. */
1312
1313 if (res == TARGET_XFER_OK
1314 && inf != NULL
1315 && writebuf != NULL
1316 && target_dcache_init_p ()
1317 && !region->attrib.cache
1318 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1319 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1320 {
1321 DCACHE *dcache = target_dcache_get ();
1322
1323 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1324 }
1325
1326 /* If we still haven't got anything, return the last error. We
1327 give up. */
1328 return res;
1329 }
1330
1331 /* Perform a partial memory transfer. For docs see target.h,
1332 to_xfer_partial. */
1333
1334 static enum target_xfer_status
1335 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1336 gdb_byte *readbuf, const gdb_byte *writebuf,
1337 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1338 {
1339 enum target_xfer_status res;
1340
1341 /* Zero length requests are ok and require no work. */
1342 if (len == 0)
1343 return TARGET_XFER_EOF;
1344
1345 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1346 breakpoint insns, thus hiding out from higher layers whether
1347 there are software breakpoints inserted in the code stream. */
1348 if (readbuf != NULL)
1349 {
1350 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1351 xfered_len);
1352
1353 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1354 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1355 }
1356 else
1357 {
1358 void *buf;
1359 struct cleanup *old_chain;
1360
1361 /* A large write request is likely to be partially satisfied
1362 by memory_xfer_partial_1. We will continually malloc
1363 and free a copy of the entire write request for breakpoint
1364 shadow handling even though we only end up writing a small
1365 subset of it. Cap writes to 4KB to mitigate this. */
1366 len = min (4096, len);
1367
1368 buf = xmalloc (len);
1369 old_chain = make_cleanup (xfree, buf);
1370 memcpy (buf, writebuf, len);
1371
1372 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1373 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1374 xfered_len);
1375
1376 do_cleanups (old_chain);
1377 }
1378
1379 return res;
1380 }
1381
1382 static void
1383 restore_show_memory_breakpoints (void *arg)
1384 {
1385 show_memory_breakpoints = (uintptr_t) arg;
1386 }
1387
1388 struct cleanup *
1389 make_show_memory_breakpoints_cleanup (int show)
1390 {
1391 int current = show_memory_breakpoints;
1392
1393 show_memory_breakpoints = show;
1394 return make_cleanup (restore_show_memory_breakpoints,
1395 (void *) (uintptr_t) current);
1396 }
1397
1398 /* For docs see target.h, to_xfer_partial. */
1399
1400 enum target_xfer_status
1401 target_xfer_partial (struct target_ops *ops,
1402 enum target_object object, const char *annex,
1403 gdb_byte *readbuf, const gdb_byte *writebuf,
1404 ULONGEST offset, ULONGEST len,
1405 ULONGEST *xfered_len)
1406 {
1407 enum target_xfer_status retval;
1408
1409 gdb_assert (ops->to_xfer_partial != NULL);
1410
1411 /* Transfer is done when LEN is zero. */
1412 if (len == 0)
1413 return TARGET_XFER_EOF;
1414
1415 if (writebuf && !may_write_memory)
1416 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1417 core_addr_to_string_nz (offset), plongest (len));
1418
1419 *xfered_len = 0;
1420
1421 /* If this is a memory transfer, let the memory-specific code
1422 have a look at it instead. Memory transfers are more
1423 complicated. */
1424 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1425 || object == TARGET_OBJECT_CODE_MEMORY)
1426 retval = memory_xfer_partial (ops, object, readbuf,
1427 writebuf, offset, len, xfered_len);
1428 else if (object == TARGET_OBJECT_RAW_MEMORY)
1429 {
1430 /* Request the normal memory object from other layers. */
1431 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1432 xfered_len);
1433 }
1434 else
1435 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1436 writebuf, offset, len, xfered_len);
1437
1438 if (targetdebug)
1439 {
1440 const unsigned char *myaddr = NULL;
1441
1442 fprintf_unfiltered (gdb_stdlog,
1443 "%s:target_xfer_partial "
1444 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1445 ops->to_shortname,
1446 (int) object,
1447 (annex ? annex : "(null)"),
1448 host_address_to_string (readbuf),
1449 host_address_to_string (writebuf),
1450 core_addr_to_string_nz (offset),
1451 pulongest (len), retval,
1452 pulongest (*xfered_len));
1453
1454 if (readbuf)
1455 myaddr = readbuf;
1456 if (writebuf)
1457 myaddr = writebuf;
1458 if (retval == TARGET_XFER_OK && myaddr != NULL)
1459 {
1460 int i;
1461
1462 fputs_unfiltered (", bytes =", gdb_stdlog);
1463 for (i = 0; i < *xfered_len; i++)
1464 {
1465 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1466 {
1467 if (targetdebug < 2 && i > 0)
1468 {
1469 fprintf_unfiltered (gdb_stdlog, " ...");
1470 break;
1471 }
1472 fprintf_unfiltered (gdb_stdlog, "\n");
1473 }
1474
1475 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1476 }
1477 }
1478
1479 fputc_unfiltered ('\n', gdb_stdlog);
1480 }
1481
1482 /* Check implementations of to_xfer_partial update *XFERED_LEN
1483 properly. Do assertion after printing debug messages, so that we
1484 can find more clues on assertion failure from debugging messages. */
1485 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1486 gdb_assert (*xfered_len > 0);
1487
1488 return retval;
1489 }
1490
1491 /* Read LEN bytes of target memory at address MEMADDR, placing the
1492 results in GDB's memory at MYADDR. Returns either 0 for success or
1493 TARGET_XFER_E_IO if any error occurs.
1494
1495 If an error occurs, no guarantee is made about the contents of the data at
1496 MYADDR. In particular, the caller should not depend upon partial reads
1497 filling the buffer with good data. There is no way for the caller to know
1498 how much good data might have been transfered anyway. Callers that can
1499 deal with partial reads should call target_read (which will retry until
1500 it makes no progress, and then return how much was transferred). */
1501
1502 int
1503 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1504 {
1505 /* Dispatch to the topmost target, not the flattened current_target.
1506 Memory accesses check target->to_has_(all_)memory, and the
1507 flattened target doesn't inherit those. */
1508 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1509 myaddr, memaddr, len) == len)
1510 return 0;
1511 else
1512 return TARGET_XFER_E_IO;
1513 }
1514
1515 /* Like target_read_memory, but specify explicitly that this is a read
1516 from the target's raw memory. That is, this read bypasses the
1517 dcache, breakpoint shadowing, etc. */
1518
1519 int
1520 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1521 {
1522 /* See comment in target_read_memory about why the request starts at
1523 current_target.beneath. */
1524 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1525 myaddr, memaddr, len) == len)
1526 return 0;
1527 else
1528 return TARGET_XFER_E_IO;
1529 }
1530
1531 /* Like target_read_memory, but specify explicitly that this is a read from
1532 the target's stack. This may trigger different cache behavior. */
1533
1534 int
1535 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1536 {
1537 /* See comment in target_read_memory about why the request starts at
1538 current_target.beneath. */
1539 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1540 myaddr, memaddr, len) == len)
1541 return 0;
1542 else
1543 return TARGET_XFER_E_IO;
1544 }
1545
1546 /* Like target_read_memory, but specify explicitly that this is a read from
1547 the target's code. This may trigger different cache behavior. */
1548
1549 int
1550 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1551 {
1552 /* See comment in target_read_memory about why the request starts at
1553 current_target.beneath. */
1554 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1555 myaddr, memaddr, len) == len)
1556 return 0;
1557 else
1558 return TARGET_XFER_E_IO;
1559 }
1560
1561 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1562 Returns either 0 for success or TARGET_XFER_E_IO if any
1563 error occurs. If an error occurs, no guarantee is made about how
1564 much data got written. Callers that can deal with partial writes
1565 should call target_write. */
1566
1567 int
1568 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1569 {
1570 /* See comment in target_read_memory about why the request starts at
1571 current_target.beneath. */
1572 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1573 myaddr, memaddr, len) == len)
1574 return 0;
1575 else
1576 return TARGET_XFER_E_IO;
1577 }
1578
1579 /* Write LEN bytes from MYADDR to target raw memory at address
1580 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1581 if any error occurs. If an error occurs, no guarantee is made
1582 about how much data got written. Callers that can deal with
1583 partial writes should call target_write. */
1584
1585 int
1586 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1587 {
1588 /* See comment in target_read_memory about why the request starts at
1589 current_target.beneath. */
1590 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1591 myaddr, memaddr, len) == len)
1592 return 0;
1593 else
1594 return TARGET_XFER_E_IO;
1595 }
1596
1597 /* Fetch the target's memory map. */
1598
1599 VEC(mem_region_s) *
1600 target_memory_map (void)
1601 {
1602 VEC(mem_region_s) *result;
1603 struct mem_region *last_one, *this_one;
1604 int ix;
1605 struct target_ops *t;
1606
1607 if (targetdebug)
1608 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1609
1610 result = current_target.to_memory_map (&current_target);
1611 if (result == NULL)
1612 return NULL;
1613
1614 qsort (VEC_address (mem_region_s, result),
1615 VEC_length (mem_region_s, result),
1616 sizeof (struct mem_region), mem_region_cmp);
1617
1618 /* Check that regions do not overlap. Simultaneously assign
1619 a numbering for the "mem" commands to use to refer to
1620 each region. */
1621 last_one = NULL;
1622 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1623 {
1624 this_one->number = ix;
1625
1626 if (last_one && last_one->hi > this_one->lo)
1627 {
1628 warning (_("Overlapping regions in memory map: ignoring"));
1629 VEC_free (mem_region_s, result);
1630 return NULL;
1631 }
1632 last_one = this_one;
1633 }
1634
1635 return result;
1636 }
1637
1638 void
1639 target_flash_erase (ULONGEST address, LONGEST length)
1640 {
1641 if (targetdebug)
1642 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1643 hex_string (address), phex (length, 0));
1644 current_target.to_flash_erase (&current_target, address, length);
1645 }
1646
1647 void
1648 target_flash_done (void)
1649 {
1650 if (targetdebug)
1651 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1652 current_target.to_flash_done (&current_target);
1653 }
1654
1655 static void
1656 show_trust_readonly (struct ui_file *file, int from_tty,
1657 struct cmd_list_element *c, const char *value)
1658 {
1659 fprintf_filtered (file,
1660 _("Mode for reading from readonly sections is %s.\n"),
1661 value);
1662 }
1663
1664 /* More generic transfers. */
1665
1666 static enum target_xfer_status
1667 default_xfer_partial (struct target_ops *ops, enum target_object object,
1668 const char *annex, gdb_byte *readbuf,
1669 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
1670 ULONGEST *xfered_len)
1671 {
1672 if (object == TARGET_OBJECT_MEMORY
1673 && ops->deprecated_xfer_memory != NULL)
1674 /* If available, fall back to the target's
1675 "deprecated_xfer_memory" method. */
1676 {
1677 int xfered = -1;
1678
1679 errno = 0;
1680 if (writebuf != NULL)
1681 {
1682 void *buffer = xmalloc (len);
1683 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1684
1685 memcpy (buffer, writebuf, len);
1686 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1687 1/*write*/, NULL, ops);
1688 do_cleanups (cleanup);
1689 }
1690 if (readbuf != NULL)
1691 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1692 0/*read*/, NULL, ops);
1693 if (xfered > 0)
1694 {
1695 *xfered_len = (ULONGEST) xfered;
1696 return TARGET_XFER_E_IO;
1697 }
1698 else if (xfered == 0 && errno == 0)
1699 /* "deprecated_xfer_memory" uses 0, cross checked against
1700 ERRNO as one indication of an error. */
1701 return TARGET_XFER_EOF;
1702 else
1703 return TARGET_XFER_E_IO;
1704 }
1705 else
1706 {
1707 gdb_assert (ops->beneath != NULL);
1708 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1709 readbuf, writebuf, offset, len,
1710 xfered_len);
1711 }
1712 }
1713
1714 /* Target vector read/write partial wrapper functions. */
1715
1716 static enum target_xfer_status
1717 target_read_partial (struct target_ops *ops,
1718 enum target_object object,
1719 const char *annex, gdb_byte *buf,
1720 ULONGEST offset, ULONGEST len,
1721 ULONGEST *xfered_len)
1722 {
1723 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1724 xfered_len);
1725 }
1726
1727 static enum target_xfer_status
1728 target_write_partial (struct target_ops *ops,
1729 enum target_object object,
1730 const char *annex, const gdb_byte *buf,
1731 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1732 {
1733 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1734 xfered_len);
1735 }
1736
1737 /* Wrappers to perform the full transfer. */
1738
1739 /* For docs on target_read see target.h. */
1740
1741 LONGEST
1742 target_read (struct target_ops *ops,
1743 enum target_object object,
1744 const char *annex, gdb_byte *buf,
1745 ULONGEST offset, LONGEST len)
1746 {
1747 LONGEST xfered = 0;
1748
1749 while (xfered < len)
1750 {
1751 ULONGEST xfered_len;
1752 enum target_xfer_status status;
1753
1754 status = target_read_partial (ops, object, annex,
1755 (gdb_byte *) buf + xfered,
1756 offset + xfered, len - xfered,
1757 &xfered_len);
1758
1759 /* Call an observer, notifying them of the xfer progress? */
1760 if (status == TARGET_XFER_EOF)
1761 return xfered;
1762 else if (status == TARGET_XFER_OK)
1763 {
1764 xfered += xfered_len;
1765 QUIT;
1766 }
1767 else
1768 return -1;
1769
1770 }
1771 return len;
1772 }
1773
1774 /* Assuming that the entire [begin, end) range of memory cannot be
1775 read, try to read whatever subrange is possible to read.
1776
1777 The function returns, in RESULT, either zero or one memory block.
1778 If there's a readable subrange at the beginning, it is completely
1779 read and returned. Any further readable subrange will not be read.
1780 Otherwise, if there's a readable subrange at the end, it will be
1781 completely read and returned. Any readable subranges before it
1782 (obviously, not starting at the beginning), will be ignored. In
1783 other cases -- either no readable subrange, or readable subrange(s)
1784 that is neither at the beginning, or end, nothing is returned.
1785
1786 The purpose of this function is to handle a read across a boundary
1787 of accessible memory in a case when memory map is not available.
1788 The above restrictions are fine for this case, but will give
1789 incorrect results if the memory is 'patchy'. However, supporting
1790 'patchy' memory would require trying to read every single byte,
1791 and it seems unacceptable solution. Explicit memory map is
1792 recommended for this case -- and target_read_memory_robust will
1793 take care of reading multiple ranges then. */
1794
1795 static void
1796 read_whatever_is_readable (struct target_ops *ops,
1797 ULONGEST begin, ULONGEST end,
1798 VEC(memory_read_result_s) **result)
1799 {
1800 gdb_byte *buf = xmalloc (end - begin);
1801 ULONGEST current_begin = begin;
1802 ULONGEST current_end = end;
1803 int forward;
1804 memory_read_result_s r;
1805 ULONGEST xfered_len;
1806
1807 /* If we previously failed to read 1 byte, nothing can be done here. */
1808 if (end - begin <= 1)
1809 {
1810 xfree (buf);
1811 return;
1812 }
1813
1814 /* Check that either first or the last byte is readable, and give up
1815 if not. This heuristic is meant to permit reading accessible memory
1816 at the boundary of accessible region. */
1817 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1818 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1819 {
1820 forward = 1;
1821 ++current_begin;
1822 }
1823 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1824 buf + (end-begin) - 1, end - 1, 1,
1825 &xfered_len) == TARGET_XFER_OK)
1826 {
1827 forward = 0;
1828 --current_end;
1829 }
1830 else
1831 {
1832 xfree (buf);
1833 return;
1834 }
1835
1836 /* Loop invariant is that the [current_begin, current_end) was previously
1837 found to be not readable as a whole.
1838
1839 Note loop condition -- if the range has 1 byte, we can't divide the range
1840 so there's no point trying further. */
1841 while (current_end - current_begin > 1)
1842 {
1843 ULONGEST first_half_begin, first_half_end;
1844 ULONGEST second_half_begin, second_half_end;
1845 LONGEST xfer;
1846 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1847
1848 if (forward)
1849 {
1850 first_half_begin = current_begin;
1851 first_half_end = middle;
1852 second_half_begin = middle;
1853 second_half_end = current_end;
1854 }
1855 else
1856 {
1857 first_half_begin = middle;
1858 first_half_end = current_end;
1859 second_half_begin = current_begin;
1860 second_half_end = middle;
1861 }
1862
1863 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1864 buf + (first_half_begin - begin),
1865 first_half_begin,
1866 first_half_end - first_half_begin);
1867
1868 if (xfer == first_half_end - first_half_begin)
1869 {
1870 /* This half reads up fine. So, the error must be in the
1871 other half. */
1872 current_begin = second_half_begin;
1873 current_end = second_half_end;
1874 }
1875 else
1876 {
1877 /* This half is not readable. Because we've tried one byte, we
1878 know some part of this half if actually redable. Go to the next
1879 iteration to divide again and try to read.
1880
1881 We don't handle the other half, because this function only tries
1882 to read a single readable subrange. */
1883 current_begin = first_half_begin;
1884 current_end = first_half_end;
1885 }
1886 }
1887
1888 if (forward)
1889 {
1890 /* The [begin, current_begin) range has been read. */
1891 r.begin = begin;
1892 r.end = current_begin;
1893 r.data = buf;
1894 }
1895 else
1896 {
1897 /* The [current_end, end) range has been read. */
1898 LONGEST rlen = end - current_end;
1899
1900 r.data = xmalloc (rlen);
1901 memcpy (r.data, buf + current_end - begin, rlen);
1902 r.begin = current_end;
1903 r.end = end;
1904 xfree (buf);
1905 }
1906 VEC_safe_push(memory_read_result_s, (*result), &r);
1907 }
1908
1909 void
1910 free_memory_read_result_vector (void *x)
1911 {
1912 VEC(memory_read_result_s) *v = x;
1913 memory_read_result_s *current;
1914 int ix;
1915
1916 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1917 {
1918 xfree (current->data);
1919 }
1920 VEC_free (memory_read_result_s, v);
1921 }
1922
1923 VEC(memory_read_result_s) *
1924 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1925 {
1926 VEC(memory_read_result_s) *result = 0;
1927
1928 LONGEST xfered = 0;
1929 while (xfered < len)
1930 {
1931 struct mem_region *region = lookup_mem_region (offset + xfered);
1932 LONGEST rlen;
1933
1934 /* If there is no explicit region, a fake one should be created. */
1935 gdb_assert (region);
1936
1937 if (region->hi == 0)
1938 rlen = len - xfered;
1939 else
1940 rlen = region->hi - offset;
1941
1942 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1943 {
1944 /* Cannot read this region. Note that we can end up here only
1945 if the region is explicitly marked inaccessible, or
1946 'inaccessible-by-default' is in effect. */
1947 xfered += rlen;
1948 }
1949 else
1950 {
1951 LONGEST to_read = min (len - xfered, rlen);
1952 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1953
1954 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1955 (gdb_byte *) buffer,
1956 offset + xfered, to_read);
1957 /* Call an observer, notifying them of the xfer progress? */
1958 if (xfer <= 0)
1959 {
1960 /* Got an error reading full chunk. See if maybe we can read
1961 some subrange. */
1962 xfree (buffer);
1963 read_whatever_is_readable (ops, offset + xfered,
1964 offset + xfered + to_read, &result);
1965 xfered += to_read;
1966 }
1967 else
1968 {
1969 struct memory_read_result r;
1970 r.data = buffer;
1971 r.begin = offset + xfered;
1972 r.end = r.begin + xfer;
1973 VEC_safe_push (memory_read_result_s, result, &r);
1974 xfered += xfer;
1975 }
1976 QUIT;
1977 }
1978 }
1979 return result;
1980 }
1981
1982
1983 /* An alternative to target_write with progress callbacks. */
1984
1985 LONGEST
1986 target_write_with_progress (struct target_ops *ops,
1987 enum target_object object,
1988 const char *annex, const gdb_byte *buf,
1989 ULONGEST offset, LONGEST len,
1990 void (*progress) (ULONGEST, void *), void *baton)
1991 {
1992 LONGEST xfered = 0;
1993
1994 /* Give the progress callback a chance to set up. */
1995 if (progress)
1996 (*progress) (0, baton);
1997
1998 while (xfered < len)
1999 {
2000 ULONGEST xfered_len;
2001 enum target_xfer_status status;
2002
2003 status = target_write_partial (ops, object, annex,
2004 (gdb_byte *) buf + xfered,
2005 offset + xfered, len - xfered,
2006 &xfered_len);
2007
2008 if (status == TARGET_XFER_EOF)
2009 return xfered;
2010 if (TARGET_XFER_STATUS_ERROR_P (status))
2011 return -1;
2012
2013 gdb_assert (status == TARGET_XFER_OK);
2014 if (progress)
2015 (*progress) (xfered_len, baton);
2016
2017 xfered += xfered_len;
2018 QUIT;
2019 }
2020 return len;
2021 }
2022
2023 /* For docs on target_write see target.h. */
2024
2025 LONGEST
2026 target_write (struct target_ops *ops,
2027 enum target_object object,
2028 const char *annex, const gdb_byte *buf,
2029 ULONGEST offset, LONGEST len)
2030 {
2031 return target_write_with_progress (ops, object, annex, buf, offset, len,
2032 NULL, NULL);
2033 }
2034
2035 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2036 the size of the transferred data. PADDING additional bytes are
2037 available in *BUF_P. This is a helper function for
2038 target_read_alloc; see the declaration of that function for more
2039 information. */
2040
2041 static LONGEST
2042 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2043 const char *annex, gdb_byte **buf_p, int padding)
2044 {
2045 size_t buf_alloc, buf_pos;
2046 gdb_byte *buf;
2047
2048 /* This function does not have a length parameter; it reads the
2049 entire OBJECT). Also, it doesn't support objects fetched partly
2050 from one target and partly from another (in a different stratum,
2051 e.g. a core file and an executable). Both reasons make it
2052 unsuitable for reading memory. */
2053 gdb_assert (object != TARGET_OBJECT_MEMORY);
2054
2055 /* Start by reading up to 4K at a time. The target will throttle
2056 this number down if necessary. */
2057 buf_alloc = 4096;
2058 buf = xmalloc (buf_alloc);
2059 buf_pos = 0;
2060 while (1)
2061 {
2062 ULONGEST xfered_len;
2063 enum target_xfer_status status;
2064
2065 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2066 buf_pos, buf_alloc - buf_pos - padding,
2067 &xfered_len);
2068
2069 if (status == TARGET_XFER_EOF)
2070 {
2071 /* Read all there was. */
2072 if (buf_pos == 0)
2073 xfree (buf);
2074 else
2075 *buf_p = buf;
2076 return buf_pos;
2077 }
2078 else if (status != TARGET_XFER_OK)
2079 {
2080 /* An error occurred. */
2081 xfree (buf);
2082 return TARGET_XFER_E_IO;
2083 }
2084
2085 buf_pos += xfered_len;
2086
2087 /* If the buffer is filling up, expand it. */
2088 if (buf_alloc < buf_pos * 2)
2089 {
2090 buf_alloc *= 2;
2091 buf = xrealloc (buf, buf_alloc);
2092 }
2093
2094 QUIT;
2095 }
2096 }
2097
2098 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2099 the size of the transferred data. See the declaration in "target.h"
2100 function for more information about the return value. */
2101
2102 LONGEST
2103 target_read_alloc (struct target_ops *ops, enum target_object object,
2104 const char *annex, gdb_byte **buf_p)
2105 {
2106 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2107 }
2108
2109 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2110 returned as a string, allocated using xmalloc. If an error occurs
2111 or the transfer is unsupported, NULL is returned. Empty objects
2112 are returned as allocated but empty strings. A warning is issued
2113 if the result contains any embedded NUL bytes. */
2114
2115 char *
2116 target_read_stralloc (struct target_ops *ops, enum target_object object,
2117 const char *annex)
2118 {
2119 gdb_byte *buffer;
2120 char *bufstr;
2121 LONGEST i, transferred;
2122
2123 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2124 bufstr = (char *) buffer;
2125
2126 if (transferred < 0)
2127 return NULL;
2128
2129 if (transferred == 0)
2130 return xstrdup ("");
2131
2132 bufstr[transferred] = 0;
2133
2134 /* Check for embedded NUL bytes; but allow trailing NULs. */
2135 for (i = strlen (bufstr); i < transferred; i++)
2136 if (bufstr[i] != 0)
2137 {
2138 warning (_("target object %d, annex %s, "
2139 "contained unexpected null characters"),
2140 (int) object, annex ? annex : "(none)");
2141 break;
2142 }
2143
2144 return bufstr;
2145 }
2146
2147 /* Memory transfer methods. */
2148
2149 void
2150 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2151 LONGEST len)
2152 {
2153 /* This method is used to read from an alternate, non-current
2154 target. This read must bypass the overlay support (as symbols
2155 don't match this target), and GDB's internal cache (wrong cache
2156 for this target). */
2157 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2158 != len)
2159 memory_error (TARGET_XFER_E_IO, addr);
2160 }
2161
2162 ULONGEST
2163 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2164 int len, enum bfd_endian byte_order)
2165 {
2166 gdb_byte buf[sizeof (ULONGEST)];
2167
2168 gdb_assert (len <= sizeof (buf));
2169 get_target_memory (ops, addr, buf, len);
2170 return extract_unsigned_integer (buf, len, byte_order);
2171 }
2172
2173 /* See target.h. */
2174
2175 int
2176 target_insert_breakpoint (struct gdbarch *gdbarch,
2177 struct bp_target_info *bp_tgt)
2178 {
2179 if (!may_insert_breakpoints)
2180 {
2181 warning (_("May not insert breakpoints"));
2182 return 1;
2183 }
2184
2185 return current_target.to_insert_breakpoint (&current_target,
2186 gdbarch, bp_tgt);
2187 }
2188
2189 /* See target.h. */
2190
2191 int
2192 target_remove_breakpoint (struct gdbarch *gdbarch,
2193 struct bp_target_info *bp_tgt)
2194 {
2195 /* This is kind of a weird case to handle, but the permission might
2196 have been changed after breakpoints were inserted - in which case
2197 we should just take the user literally and assume that any
2198 breakpoints should be left in place. */
2199 if (!may_insert_breakpoints)
2200 {
2201 warning (_("May not remove breakpoints"));
2202 return 1;
2203 }
2204
2205 return current_target.to_remove_breakpoint (&current_target,
2206 gdbarch, bp_tgt);
2207 }
2208
2209 static void
2210 target_info (char *args, int from_tty)
2211 {
2212 struct target_ops *t;
2213 int has_all_mem = 0;
2214
2215 if (symfile_objfile != NULL)
2216 printf_unfiltered (_("Symbols from \"%s\".\n"),
2217 objfile_name (symfile_objfile));
2218
2219 for (t = target_stack; t != NULL; t = t->beneath)
2220 {
2221 if (!(*t->to_has_memory) (t))
2222 continue;
2223
2224 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2225 continue;
2226 if (has_all_mem)
2227 printf_unfiltered (_("\tWhile running this, "
2228 "GDB does not access memory from...\n"));
2229 printf_unfiltered ("%s:\n", t->to_longname);
2230 (t->to_files_info) (t);
2231 has_all_mem = (*t->to_has_all_memory) (t);
2232 }
2233 }
2234
2235 /* This function is called before any new inferior is created, e.g.
2236 by running a program, attaching, or connecting to a target.
2237 It cleans up any state from previous invocations which might
2238 change between runs. This is a subset of what target_preopen
2239 resets (things which might change between targets). */
2240
2241 void
2242 target_pre_inferior (int from_tty)
2243 {
2244 /* Clear out solib state. Otherwise the solib state of the previous
2245 inferior might have survived and is entirely wrong for the new
2246 target. This has been observed on GNU/Linux using glibc 2.3. How
2247 to reproduce:
2248
2249 bash$ ./foo&
2250 [1] 4711
2251 bash$ ./foo&
2252 [1] 4712
2253 bash$ gdb ./foo
2254 [...]
2255 (gdb) attach 4711
2256 (gdb) detach
2257 (gdb) attach 4712
2258 Cannot access memory at address 0xdeadbeef
2259 */
2260
2261 /* In some OSs, the shared library list is the same/global/shared
2262 across inferiors. If code is shared between processes, so are
2263 memory regions and features. */
2264 if (!gdbarch_has_global_solist (target_gdbarch ()))
2265 {
2266 no_shared_libraries (NULL, from_tty);
2267
2268 invalidate_target_mem_regions ();
2269
2270 target_clear_description ();
2271 }
2272
2273 agent_capability_invalidate ();
2274 }
2275
2276 /* Callback for iterate_over_inferiors. Gets rid of the given
2277 inferior. */
2278
2279 static int
2280 dispose_inferior (struct inferior *inf, void *args)
2281 {
2282 struct thread_info *thread;
2283
2284 thread = any_thread_of_process (inf->pid);
2285 if (thread)
2286 {
2287 switch_to_thread (thread->ptid);
2288
2289 /* Core inferiors actually should be detached, not killed. */
2290 if (target_has_execution)
2291 target_kill ();
2292 else
2293 target_detach (NULL, 0);
2294 }
2295
2296 return 0;
2297 }
2298
2299 /* This is to be called by the open routine before it does
2300 anything. */
2301
2302 void
2303 target_preopen (int from_tty)
2304 {
2305 dont_repeat ();
2306
2307 if (have_inferiors ())
2308 {
2309 if (!from_tty
2310 || !have_live_inferiors ()
2311 || query (_("A program is being debugged already. Kill it? ")))
2312 iterate_over_inferiors (dispose_inferior, NULL);
2313 else
2314 error (_("Program not killed."));
2315 }
2316
2317 /* Calling target_kill may remove the target from the stack. But if
2318 it doesn't (which seems like a win for UDI), remove it now. */
2319 /* Leave the exec target, though. The user may be switching from a
2320 live process to a core of the same program. */
2321 pop_all_targets_above (file_stratum);
2322
2323 target_pre_inferior (from_tty);
2324 }
2325
2326 /* Detach a target after doing deferred register stores. */
2327
2328 void
2329 target_detach (const char *args, int from_tty)
2330 {
2331 struct target_ops* t;
2332
2333 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2334 /* Don't remove global breakpoints here. They're removed on
2335 disconnection from the target. */
2336 ;
2337 else
2338 /* If we're in breakpoints-always-inserted mode, have to remove
2339 them before detaching. */
2340 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2341
2342 prepare_for_detach ();
2343
2344 current_target.to_detach (&current_target, args, from_tty);
2345 if (targetdebug)
2346 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2347 args, from_tty);
2348 }
2349
2350 void
2351 target_disconnect (char *args, int from_tty)
2352 {
2353 /* If we're in breakpoints-always-inserted mode or if breakpoints
2354 are global across processes, we have to remove them before
2355 disconnecting. */
2356 remove_breakpoints ();
2357
2358 if (targetdebug)
2359 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2360 args, from_tty);
2361 current_target.to_disconnect (&current_target, args, from_tty);
2362 }
2363
2364 ptid_t
2365 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2366 {
2367 struct target_ops *t;
2368 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2369 status, options);
2370
2371 if (targetdebug)
2372 {
2373 char *status_string;
2374 char *options_string;
2375
2376 status_string = target_waitstatus_to_string (status);
2377 options_string = target_options_to_string (options);
2378 fprintf_unfiltered (gdb_stdlog,
2379 "target_wait (%d, status, options={%s})"
2380 " = %d, %s\n",
2381 ptid_get_pid (ptid), options_string,
2382 ptid_get_pid (retval), status_string);
2383 xfree (status_string);
2384 xfree (options_string);
2385 }
2386
2387 return retval;
2388 }
2389
2390 char *
2391 target_pid_to_str (ptid_t ptid)
2392 {
2393 return (*current_target.to_pid_to_str) (&current_target, ptid);
2394 }
2395
2396 char *
2397 target_thread_name (struct thread_info *info)
2398 {
2399 return current_target.to_thread_name (&current_target, info);
2400 }
2401
2402 void
2403 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2404 {
2405 struct target_ops *t;
2406
2407 target_dcache_invalidate ();
2408
2409 current_target.to_resume (&current_target, ptid, step, signal);
2410 if (targetdebug)
2411 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2412 ptid_get_pid (ptid),
2413 step ? "step" : "continue",
2414 gdb_signal_to_name (signal));
2415
2416 registers_changed_ptid (ptid);
2417 set_executing (ptid, 1);
2418 set_running (ptid, 1);
2419 clear_inline_frame_state (ptid);
2420 }
2421
2422 void
2423 target_pass_signals (int numsigs, unsigned char *pass_signals)
2424 {
2425 if (targetdebug)
2426 {
2427 int i;
2428
2429 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2430 numsigs);
2431
2432 for (i = 0; i < numsigs; i++)
2433 if (pass_signals[i])
2434 fprintf_unfiltered (gdb_stdlog, " %s",
2435 gdb_signal_to_name (i));
2436
2437 fprintf_unfiltered (gdb_stdlog, " })\n");
2438 }
2439
2440 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2441 }
2442
2443 void
2444 target_program_signals (int numsigs, unsigned char *program_signals)
2445 {
2446 if (targetdebug)
2447 {
2448 int i;
2449
2450 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2451 numsigs);
2452
2453 for (i = 0; i < numsigs; i++)
2454 if (program_signals[i])
2455 fprintf_unfiltered (gdb_stdlog, " %s",
2456 gdb_signal_to_name (i));
2457
2458 fprintf_unfiltered (gdb_stdlog, " })\n");
2459 }
2460
2461 (*current_target.to_program_signals) (&current_target,
2462 numsigs, program_signals);
2463 }
2464
2465 static int
2466 default_follow_fork (struct target_ops *self, int follow_child,
2467 int detach_fork)
2468 {
2469 /* Some target returned a fork event, but did not know how to follow it. */
2470 internal_error (__FILE__, __LINE__,
2471 _("could not find a target to follow fork"));
2472 }
2473
2474 /* Look through the list of possible targets for a target that can
2475 follow forks. */
2476
2477 int
2478 target_follow_fork (int follow_child, int detach_fork)
2479 {
2480 int retval = current_target.to_follow_fork (&current_target,
2481 follow_child, detach_fork);
2482
2483 if (targetdebug)
2484 fprintf_unfiltered (gdb_stdlog,
2485 "target_follow_fork (%d, %d) = %d\n",
2486 follow_child, detach_fork, retval);
2487 return retval;
2488 }
2489
2490 static void
2491 default_mourn_inferior (struct target_ops *self)
2492 {
2493 internal_error (__FILE__, __LINE__,
2494 _("could not find a target to follow mourn inferior"));
2495 }
2496
2497 void
2498 target_mourn_inferior (void)
2499 {
2500 current_target.to_mourn_inferior (&current_target);
2501 if (targetdebug)
2502 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2503
2504 /* We no longer need to keep handles on any of the object files.
2505 Make sure to release them to avoid unnecessarily locking any
2506 of them while we're not actually debugging. */
2507 bfd_cache_close_all ();
2508 }
2509
2510 /* Look for a target which can describe architectural features, starting
2511 from TARGET. If we find one, return its description. */
2512
2513 const struct target_desc *
2514 target_read_description (struct target_ops *target)
2515 {
2516 return target->to_read_description (target);
2517 }
2518
2519 /* This implements a basic search of memory, reading target memory and
2520 performing the search here (as opposed to performing the search in on the
2521 target side with, for example, gdbserver). */
2522
2523 int
2524 simple_search_memory (struct target_ops *ops,
2525 CORE_ADDR start_addr, ULONGEST search_space_len,
2526 const gdb_byte *pattern, ULONGEST pattern_len,
2527 CORE_ADDR *found_addrp)
2528 {
2529 /* NOTE: also defined in find.c testcase. */
2530 #define SEARCH_CHUNK_SIZE 16000
2531 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2532 /* Buffer to hold memory contents for searching. */
2533 gdb_byte *search_buf;
2534 unsigned search_buf_size;
2535 struct cleanup *old_cleanups;
2536
2537 search_buf_size = chunk_size + pattern_len - 1;
2538
2539 /* No point in trying to allocate a buffer larger than the search space. */
2540 if (search_space_len < search_buf_size)
2541 search_buf_size = search_space_len;
2542
2543 search_buf = malloc (search_buf_size);
2544 if (search_buf == NULL)
2545 error (_("Unable to allocate memory to perform the search."));
2546 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2547
2548 /* Prime the search buffer. */
2549
2550 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2551 search_buf, start_addr, search_buf_size) != search_buf_size)
2552 {
2553 warning (_("Unable to access %s bytes of target "
2554 "memory at %s, halting search."),
2555 pulongest (search_buf_size), hex_string (start_addr));
2556 do_cleanups (old_cleanups);
2557 return -1;
2558 }
2559
2560 /* Perform the search.
2561
2562 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2563 When we've scanned N bytes we copy the trailing bytes to the start and
2564 read in another N bytes. */
2565
2566 while (search_space_len >= pattern_len)
2567 {
2568 gdb_byte *found_ptr;
2569 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2570
2571 found_ptr = memmem (search_buf, nr_search_bytes,
2572 pattern, pattern_len);
2573
2574 if (found_ptr != NULL)
2575 {
2576 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2577
2578 *found_addrp = found_addr;
2579 do_cleanups (old_cleanups);
2580 return 1;
2581 }
2582
2583 /* Not found in this chunk, skip to next chunk. */
2584
2585 /* Don't let search_space_len wrap here, it's unsigned. */
2586 if (search_space_len >= chunk_size)
2587 search_space_len -= chunk_size;
2588 else
2589 search_space_len = 0;
2590
2591 if (search_space_len >= pattern_len)
2592 {
2593 unsigned keep_len = search_buf_size - chunk_size;
2594 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2595 int nr_to_read;
2596
2597 /* Copy the trailing part of the previous iteration to the front
2598 of the buffer for the next iteration. */
2599 gdb_assert (keep_len == pattern_len - 1);
2600 memcpy (search_buf, search_buf + chunk_size, keep_len);
2601
2602 nr_to_read = min (search_space_len - keep_len, chunk_size);
2603
2604 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2605 search_buf + keep_len, read_addr,
2606 nr_to_read) != nr_to_read)
2607 {
2608 warning (_("Unable to access %s bytes of target "
2609 "memory at %s, halting search."),
2610 plongest (nr_to_read),
2611 hex_string (read_addr));
2612 do_cleanups (old_cleanups);
2613 return -1;
2614 }
2615
2616 start_addr += chunk_size;
2617 }
2618 }
2619
2620 /* Not found. */
2621
2622 do_cleanups (old_cleanups);
2623 return 0;
2624 }
2625
2626 /* Default implementation of memory-searching. */
2627
2628 static int
2629 default_search_memory (struct target_ops *self,
2630 CORE_ADDR start_addr, ULONGEST search_space_len,
2631 const gdb_byte *pattern, ULONGEST pattern_len,
2632 CORE_ADDR *found_addrp)
2633 {
2634 /* Start over from the top of the target stack. */
2635 return simple_search_memory (current_target.beneath,
2636 start_addr, search_space_len,
2637 pattern, pattern_len, found_addrp);
2638 }
2639
2640 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2641 sequence of bytes in PATTERN with length PATTERN_LEN.
2642
2643 The result is 1 if found, 0 if not found, and -1 if there was an error
2644 requiring halting of the search (e.g. memory read error).
2645 If the pattern is found the address is recorded in FOUND_ADDRP. */
2646
2647 int
2648 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2649 const gdb_byte *pattern, ULONGEST pattern_len,
2650 CORE_ADDR *found_addrp)
2651 {
2652 int found;
2653
2654 if (targetdebug)
2655 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2656 hex_string (start_addr));
2657
2658 found = current_target.to_search_memory (&current_target, start_addr,
2659 search_space_len,
2660 pattern, pattern_len, found_addrp);
2661
2662 if (targetdebug)
2663 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2664
2665 return found;
2666 }
2667
2668 /* Look through the currently pushed targets. If none of them will
2669 be able to restart the currently running process, issue an error
2670 message. */
2671
2672 void
2673 target_require_runnable (void)
2674 {
2675 struct target_ops *t;
2676
2677 for (t = target_stack; t != NULL; t = t->beneath)
2678 {
2679 /* If this target knows how to create a new program, then
2680 assume we will still be able to after killing the current
2681 one. Either killing and mourning will not pop T, or else
2682 find_default_run_target will find it again. */
2683 if (t->to_create_inferior != NULL)
2684 return;
2685
2686 /* Do not worry about thread_stratum targets that can not
2687 create inferiors. Assume they will be pushed again if
2688 necessary, and continue to the process_stratum. */
2689 if (t->to_stratum == thread_stratum
2690 || t->to_stratum == arch_stratum)
2691 continue;
2692
2693 error (_("The \"%s\" target does not support \"run\". "
2694 "Try \"help target\" or \"continue\"."),
2695 t->to_shortname);
2696 }
2697
2698 /* This function is only called if the target is running. In that
2699 case there should have been a process_stratum target and it
2700 should either know how to create inferiors, or not... */
2701 internal_error (__FILE__, __LINE__, _("No targets found"));
2702 }
2703
2704 /* Look through the list of possible targets for a target that can
2705 execute a run or attach command without any other data. This is
2706 used to locate the default process stratum.
2707
2708 If DO_MESG is not NULL, the result is always valid (error() is
2709 called for errors); else, return NULL on error. */
2710
2711 static struct target_ops *
2712 find_default_run_target (char *do_mesg)
2713 {
2714 struct target_ops **t;
2715 struct target_ops *runable = NULL;
2716 int count;
2717
2718 count = 0;
2719
2720 for (t = target_structs; t < target_structs + target_struct_size;
2721 ++t)
2722 {
2723 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2724 {
2725 runable = *t;
2726 ++count;
2727 }
2728 }
2729
2730 if (count != 1)
2731 {
2732 if (do_mesg)
2733 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2734 else
2735 return NULL;
2736 }
2737
2738 return runable;
2739 }
2740
2741 void
2742 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2743 {
2744 struct target_ops *t;
2745
2746 t = find_default_run_target ("attach");
2747 (t->to_attach) (t, args, from_tty);
2748 return;
2749 }
2750
2751 void
2752 find_default_create_inferior (struct target_ops *ops,
2753 char *exec_file, char *allargs, char **env,
2754 int from_tty)
2755 {
2756 struct target_ops *t;
2757
2758 t = find_default_run_target ("run");
2759 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2760 return;
2761 }
2762
2763 static int
2764 find_default_can_async_p (struct target_ops *ignore)
2765 {
2766 struct target_ops *t;
2767
2768 /* This may be called before the target is pushed on the stack;
2769 look for the default process stratum. If there's none, gdb isn't
2770 configured with a native debugger, and target remote isn't
2771 connected yet. */
2772 t = find_default_run_target (NULL);
2773 if (t && t->to_can_async_p != delegate_can_async_p)
2774 return (t->to_can_async_p) (t);
2775 return 0;
2776 }
2777
2778 static int
2779 find_default_is_async_p (struct target_ops *ignore)
2780 {
2781 struct target_ops *t;
2782
2783 /* This may be called before the target is pushed on the stack;
2784 look for the default process stratum. If there's none, gdb isn't
2785 configured with a native debugger, and target remote isn't
2786 connected yet. */
2787 t = find_default_run_target (NULL);
2788 if (t && t->to_is_async_p != delegate_is_async_p)
2789 return (t->to_is_async_p) (t);
2790 return 0;
2791 }
2792
2793 static int
2794 find_default_supports_non_stop (struct target_ops *self)
2795 {
2796 struct target_ops *t;
2797
2798 t = find_default_run_target (NULL);
2799 if (t && t->to_supports_non_stop)
2800 return (t->to_supports_non_stop) (t);
2801 return 0;
2802 }
2803
2804 int
2805 target_supports_non_stop (void)
2806 {
2807 struct target_ops *t;
2808
2809 for (t = &current_target; t != NULL; t = t->beneath)
2810 if (t->to_supports_non_stop)
2811 return t->to_supports_non_stop (t);
2812
2813 return 0;
2814 }
2815
2816 /* Implement the "info proc" command. */
2817
2818 int
2819 target_info_proc (char *args, enum info_proc_what what)
2820 {
2821 struct target_ops *t;
2822
2823 /* If we're already connected to something that can get us OS
2824 related data, use it. Otherwise, try using the native
2825 target. */
2826 if (current_target.to_stratum >= process_stratum)
2827 t = current_target.beneath;
2828 else
2829 t = find_default_run_target (NULL);
2830
2831 for (; t != NULL; t = t->beneath)
2832 {
2833 if (t->to_info_proc != NULL)
2834 {
2835 t->to_info_proc (t, args, what);
2836
2837 if (targetdebug)
2838 fprintf_unfiltered (gdb_stdlog,
2839 "target_info_proc (\"%s\", %d)\n", args, what);
2840
2841 return 1;
2842 }
2843 }
2844
2845 return 0;
2846 }
2847
2848 static int
2849 find_default_supports_disable_randomization (struct target_ops *self)
2850 {
2851 struct target_ops *t;
2852
2853 t = find_default_run_target (NULL);
2854 if (t && t->to_supports_disable_randomization)
2855 return (t->to_supports_disable_randomization) (t);
2856 return 0;
2857 }
2858
2859 int
2860 target_supports_disable_randomization (void)
2861 {
2862 struct target_ops *t;
2863
2864 for (t = &current_target; t != NULL; t = t->beneath)
2865 if (t->to_supports_disable_randomization)
2866 return t->to_supports_disable_randomization (t);
2867
2868 return 0;
2869 }
2870
2871 char *
2872 target_get_osdata (const char *type)
2873 {
2874 struct target_ops *t;
2875
2876 /* If we're already connected to something that can get us OS
2877 related data, use it. Otherwise, try using the native
2878 target. */
2879 if (current_target.to_stratum >= process_stratum)
2880 t = current_target.beneath;
2881 else
2882 t = find_default_run_target ("get OS data");
2883
2884 if (!t)
2885 return NULL;
2886
2887 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2888 }
2889
2890 /* Determine the current address space of thread PTID. */
2891
2892 struct address_space *
2893 target_thread_address_space (ptid_t ptid)
2894 {
2895 struct address_space *aspace;
2896 struct inferior *inf;
2897 struct target_ops *t;
2898
2899 for (t = current_target.beneath; t != NULL; t = t->beneath)
2900 {
2901 if (t->to_thread_address_space != NULL)
2902 {
2903 aspace = t->to_thread_address_space (t, ptid);
2904 gdb_assert (aspace);
2905
2906 if (targetdebug)
2907 fprintf_unfiltered (gdb_stdlog,
2908 "target_thread_address_space (%s) = %d\n",
2909 target_pid_to_str (ptid),
2910 address_space_num (aspace));
2911 return aspace;
2912 }
2913 }
2914
2915 /* Fall-back to the "main" address space of the inferior. */
2916 inf = find_inferior_pid (ptid_get_pid (ptid));
2917
2918 if (inf == NULL || inf->aspace == NULL)
2919 internal_error (__FILE__, __LINE__,
2920 _("Can't determine the current "
2921 "address space of thread %s\n"),
2922 target_pid_to_str (ptid));
2923
2924 return inf->aspace;
2925 }
2926
2927
2928 /* Target file operations. */
2929
2930 static struct target_ops *
2931 default_fileio_target (void)
2932 {
2933 /* If we're already connected to something that can perform
2934 file I/O, use it. Otherwise, try using the native target. */
2935 if (current_target.to_stratum >= process_stratum)
2936 return current_target.beneath;
2937 else
2938 return find_default_run_target ("file I/O");
2939 }
2940
2941 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2942 target file descriptor, or -1 if an error occurs (and set
2943 *TARGET_ERRNO). */
2944 int
2945 target_fileio_open (const char *filename, int flags, int mode,
2946 int *target_errno)
2947 {
2948 struct target_ops *t;
2949
2950 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2951 {
2952 if (t->to_fileio_open != NULL)
2953 {
2954 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2955
2956 if (targetdebug)
2957 fprintf_unfiltered (gdb_stdlog,
2958 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2959 filename, flags, mode,
2960 fd, fd != -1 ? 0 : *target_errno);
2961 return fd;
2962 }
2963 }
2964
2965 *target_errno = FILEIO_ENOSYS;
2966 return -1;
2967 }
2968
2969 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2970 Return the number of bytes written, or -1 if an error occurs
2971 (and set *TARGET_ERRNO). */
2972 int
2973 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2974 ULONGEST offset, int *target_errno)
2975 {
2976 struct target_ops *t;
2977
2978 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2979 {
2980 if (t->to_fileio_pwrite != NULL)
2981 {
2982 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2983 target_errno);
2984
2985 if (targetdebug)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "target_fileio_pwrite (%d,...,%d,%s) "
2988 "= %d (%d)\n",
2989 fd, len, pulongest (offset),
2990 ret, ret != -1 ? 0 : *target_errno);
2991 return ret;
2992 }
2993 }
2994
2995 *target_errno = FILEIO_ENOSYS;
2996 return -1;
2997 }
2998
2999 /* Read up to LEN bytes FD on the target into READ_BUF.
3000 Return the number of bytes read, or -1 if an error occurs
3001 (and set *TARGET_ERRNO). */
3002 int
3003 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3004 ULONGEST offset, int *target_errno)
3005 {
3006 struct target_ops *t;
3007
3008 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3009 {
3010 if (t->to_fileio_pread != NULL)
3011 {
3012 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
3013 target_errno);
3014
3015 if (targetdebug)
3016 fprintf_unfiltered (gdb_stdlog,
3017 "target_fileio_pread (%d,...,%d,%s) "
3018 "= %d (%d)\n",
3019 fd, len, pulongest (offset),
3020 ret, ret != -1 ? 0 : *target_errno);
3021 return ret;
3022 }
3023 }
3024
3025 *target_errno = FILEIO_ENOSYS;
3026 return -1;
3027 }
3028
3029 /* Close FD on the target. Return 0, or -1 if an error occurs
3030 (and set *TARGET_ERRNO). */
3031 int
3032 target_fileio_close (int fd, int *target_errno)
3033 {
3034 struct target_ops *t;
3035
3036 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3037 {
3038 if (t->to_fileio_close != NULL)
3039 {
3040 int ret = t->to_fileio_close (t, fd, target_errno);
3041
3042 if (targetdebug)
3043 fprintf_unfiltered (gdb_stdlog,
3044 "target_fileio_close (%d) = %d (%d)\n",
3045 fd, ret, ret != -1 ? 0 : *target_errno);
3046 return ret;
3047 }
3048 }
3049
3050 *target_errno = FILEIO_ENOSYS;
3051 return -1;
3052 }
3053
3054 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3055 occurs (and set *TARGET_ERRNO). */
3056 int
3057 target_fileio_unlink (const char *filename, int *target_errno)
3058 {
3059 struct target_ops *t;
3060
3061 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3062 {
3063 if (t->to_fileio_unlink != NULL)
3064 {
3065 int ret = t->to_fileio_unlink (t, filename, target_errno);
3066
3067 if (targetdebug)
3068 fprintf_unfiltered (gdb_stdlog,
3069 "target_fileio_unlink (%s) = %d (%d)\n",
3070 filename, ret, ret != -1 ? 0 : *target_errno);
3071 return ret;
3072 }
3073 }
3074
3075 *target_errno = FILEIO_ENOSYS;
3076 return -1;
3077 }
3078
3079 /* Read value of symbolic link FILENAME on the target. Return a
3080 null-terminated string allocated via xmalloc, or NULL if an error
3081 occurs (and set *TARGET_ERRNO). */
3082 char *
3083 target_fileio_readlink (const char *filename, int *target_errno)
3084 {
3085 struct target_ops *t;
3086
3087 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3088 {
3089 if (t->to_fileio_readlink != NULL)
3090 {
3091 char *ret = t->to_fileio_readlink (t, filename, target_errno);
3092
3093 if (targetdebug)
3094 fprintf_unfiltered (gdb_stdlog,
3095 "target_fileio_readlink (%s) = %s (%d)\n",
3096 filename, ret? ret : "(nil)",
3097 ret? 0 : *target_errno);
3098 return ret;
3099 }
3100 }
3101
3102 *target_errno = FILEIO_ENOSYS;
3103 return NULL;
3104 }
3105
3106 static void
3107 target_fileio_close_cleanup (void *opaque)
3108 {
3109 int fd = *(int *) opaque;
3110 int target_errno;
3111
3112 target_fileio_close (fd, &target_errno);
3113 }
3114
3115 /* Read target file FILENAME. Store the result in *BUF_P and
3116 return the size of the transferred data. PADDING additional bytes are
3117 available in *BUF_P. This is a helper function for
3118 target_fileio_read_alloc; see the declaration of that function for more
3119 information. */
3120
3121 static LONGEST
3122 target_fileio_read_alloc_1 (const char *filename,
3123 gdb_byte **buf_p, int padding)
3124 {
3125 struct cleanup *close_cleanup;
3126 size_t buf_alloc, buf_pos;
3127 gdb_byte *buf;
3128 LONGEST n;
3129 int fd;
3130 int target_errno;
3131
3132 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3133 if (fd == -1)
3134 return -1;
3135
3136 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3137
3138 /* Start by reading up to 4K at a time. The target will throttle
3139 this number down if necessary. */
3140 buf_alloc = 4096;
3141 buf = xmalloc (buf_alloc);
3142 buf_pos = 0;
3143 while (1)
3144 {
3145 n = target_fileio_pread (fd, &buf[buf_pos],
3146 buf_alloc - buf_pos - padding, buf_pos,
3147 &target_errno);
3148 if (n < 0)
3149 {
3150 /* An error occurred. */
3151 do_cleanups (close_cleanup);
3152 xfree (buf);
3153 return -1;
3154 }
3155 else if (n == 0)
3156 {
3157 /* Read all there was. */
3158 do_cleanups (close_cleanup);
3159 if (buf_pos == 0)
3160 xfree (buf);
3161 else
3162 *buf_p = buf;
3163 return buf_pos;
3164 }
3165
3166 buf_pos += n;
3167
3168 /* If the buffer is filling up, expand it. */
3169 if (buf_alloc < buf_pos * 2)
3170 {
3171 buf_alloc *= 2;
3172 buf = xrealloc (buf, buf_alloc);
3173 }
3174
3175 QUIT;
3176 }
3177 }
3178
3179 /* Read target file FILENAME. Store the result in *BUF_P and return
3180 the size of the transferred data. See the declaration in "target.h"
3181 function for more information about the return value. */
3182
3183 LONGEST
3184 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3185 {
3186 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3187 }
3188
3189 /* Read target file FILENAME. The result is NUL-terminated and
3190 returned as a string, allocated using xmalloc. If an error occurs
3191 or the transfer is unsupported, NULL is returned. Empty objects
3192 are returned as allocated but empty strings. A warning is issued
3193 if the result contains any embedded NUL bytes. */
3194
3195 char *
3196 target_fileio_read_stralloc (const char *filename)
3197 {
3198 gdb_byte *buffer;
3199 char *bufstr;
3200 LONGEST i, transferred;
3201
3202 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3203 bufstr = (char *) buffer;
3204
3205 if (transferred < 0)
3206 return NULL;
3207
3208 if (transferred == 0)
3209 return xstrdup ("");
3210
3211 bufstr[transferred] = 0;
3212
3213 /* Check for embedded NUL bytes; but allow trailing NULs. */
3214 for (i = strlen (bufstr); i < transferred; i++)
3215 if (bufstr[i] != 0)
3216 {
3217 warning (_("target file %s "
3218 "contained unexpected null characters"),
3219 filename);
3220 break;
3221 }
3222
3223 return bufstr;
3224 }
3225
3226
3227 static int
3228 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3229 CORE_ADDR addr, int len)
3230 {
3231 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3232 }
3233
3234 static int
3235 default_watchpoint_addr_within_range (struct target_ops *target,
3236 CORE_ADDR addr,
3237 CORE_ADDR start, int length)
3238 {
3239 return addr >= start && addr < start + length;
3240 }
3241
3242 static struct gdbarch *
3243 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3244 {
3245 return target_gdbarch ();
3246 }
3247
3248 static int
3249 return_zero (void)
3250 {
3251 return 0;
3252 }
3253
3254 /*
3255 * Find the next target down the stack from the specified target.
3256 */
3257
3258 struct target_ops *
3259 find_target_beneath (struct target_ops *t)
3260 {
3261 return t->beneath;
3262 }
3263
3264 /* See target.h. */
3265
3266 struct target_ops *
3267 find_target_at (enum strata stratum)
3268 {
3269 struct target_ops *t;
3270
3271 for (t = current_target.beneath; t != NULL; t = t->beneath)
3272 if (t->to_stratum == stratum)
3273 return t;
3274
3275 return NULL;
3276 }
3277
3278 \f
3279 /* The inferior process has died. Long live the inferior! */
3280
3281 void
3282 generic_mourn_inferior (void)
3283 {
3284 ptid_t ptid;
3285
3286 ptid = inferior_ptid;
3287 inferior_ptid = null_ptid;
3288
3289 /* Mark breakpoints uninserted in case something tries to delete a
3290 breakpoint while we delete the inferior's threads (which would
3291 fail, since the inferior is long gone). */
3292 mark_breakpoints_out ();
3293
3294 if (!ptid_equal (ptid, null_ptid))
3295 {
3296 int pid = ptid_get_pid (ptid);
3297 exit_inferior (pid);
3298 }
3299
3300 /* Note this wipes step-resume breakpoints, so needs to be done
3301 after exit_inferior, which ends up referencing the step-resume
3302 breakpoints through clear_thread_inferior_resources. */
3303 breakpoint_init_inferior (inf_exited);
3304
3305 registers_changed ();
3306
3307 reopen_exec_file ();
3308 reinit_frame_cache ();
3309
3310 if (deprecated_detach_hook)
3311 deprecated_detach_hook ();
3312 }
3313 \f
3314 /* Convert a normal process ID to a string. Returns the string in a
3315 static buffer. */
3316
3317 char *
3318 normal_pid_to_str (ptid_t ptid)
3319 {
3320 static char buf[32];
3321
3322 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3323 return buf;
3324 }
3325
3326 static char *
3327 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3328 {
3329 return normal_pid_to_str (ptid);
3330 }
3331
3332 /* Error-catcher for target_find_memory_regions. */
3333 static int
3334 dummy_find_memory_regions (struct target_ops *self,
3335 find_memory_region_ftype ignore1, void *ignore2)
3336 {
3337 error (_("Command not implemented for this target."));
3338 return 0;
3339 }
3340
3341 /* Error-catcher for target_make_corefile_notes. */
3342 static char *
3343 dummy_make_corefile_notes (struct target_ops *self,
3344 bfd *ignore1, int *ignore2)
3345 {
3346 error (_("Command not implemented for this target."));
3347 return NULL;
3348 }
3349
3350 /* Set up the handful of non-empty slots needed by the dummy target
3351 vector. */
3352
3353 static void
3354 init_dummy_target (void)
3355 {
3356 dummy_target.to_shortname = "None";
3357 dummy_target.to_longname = "None";
3358 dummy_target.to_doc = "";
3359 dummy_target.to_create_inferior = find_default_create_inferior;
3360 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3361 dummy_target.to_supports_disable_randomization
3362 = find_default_supports_disable_randomization;
3363 dummy_target.to_stratum = dummy_stratum;
3364 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3365 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3366 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3367 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3368 dummy_target.to_has_execution
3369 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3370 dummy_target.to_magic = OPS_MAGIC;
3371
3372 install_dummy_methods (&dummy_target);
3373 }
3374 \f
3375 static void
3376 debug_to_open (char *args, int from_tty)
3377 {
3378 debug_target.to_open (args, from_tty);
3379
3380 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3381 }
3382
3383 void
3384 target_close (struct target_ops *targ)
3385 {
3386 gdb_assert (!target_is_pushed (targ));
3387
3388 if (targ->to_xclose != NULL)
3389 targ->to_xclose (targ);
3390 else if (targ->to_close != NULL)
3391 targ->to_close (targ);
3392
3393 if (targetdebug)
3394 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3395 }
3396
3397 void
3398 target_attach (char *args, int from_tty)
3399 {
3400 current_target.to_attach (&current_target, args, from_tty);
3401 if (targetdebug)
3402 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3403 args, from_tty);
3404 }
3405
3406 int
3407 target_thread_alive (ptid_t ptid)
3408 {
3409 int retval;
3410
3411 retval = current_target.to_thread_alive (&current_target, ptid);
3412 if (targetdebug)
3413 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3414 ptid_get_pid (ptid), retval);
3415
3416 return retval;
3417 }
3418
3419 void
3420 target_find_new_threads (void)
3421 {
3422 current_target.to_find_new_threads (&current_target);
3423 if (targetdebug)
3424 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3425 }
3426
3427 void
3428 target_stop (ptid_t ptid)
3429 {
3430 if (!may_stop)
3431 {
3432 warning (_("May not interrupt or stop the target, ignoring attempt"));
3433 return;
3434 }
3435
3436 (*current_target.to_stop) (&current_target, ptid);
3437 }
3438
3439 static void
3440 debug_to_post_attach (struct target_ops *self, int pid)
3441 {
3442 debug_target.to_post_attach (&debug_target, pid);
3443
3444 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3445 }
3446
3447 /* Concatenate ELEM to LIST, a comma separate list, and return the
3448 result. The LIST incoming argument is released. */
3449
3450 static char *
3451 str_comma_list_concat_elem (char *list, const char *elem)
3452 {
3453 if (list == NULL)
3454 return xstrdup (elem);
3455 else
3456 return reconcat (list, list, ", ", elem, (char *) NULL);
3457 }
3458
3459 /* Helper for target_options_to_string. If OPT is present in
3460 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3461 Returns the new resulting string. OPT is removed from
3462 TARGET_OPTIONS. */
3463
3464 static char *
3465 do_option (int *target_options, char *ret,
3466 int opt, char *opt_str)
3467 {
3468 if ((*target_options & opt) != 0)
3469 {
3470 ret = str_comma_list_concat_elem (ret, opt_str);
3471 *target_options &= ~opt;
3472 }
3473
3474 return ret;
3475 }
3476
3477 char *
3478 target_options_to_string (int target_options)
3479 {
3480 char *ret = NULL;
3481
3482 #define DO_TARG_OPTION(OPT) \
3483 ret = do_option (&target_options, ret, OPT, #OPT)
3484
3485 DO_TARG_OPTION (TARGET_WNOHANG);
3486
3487 if (target_options != 0)
3488 ret = str_comma_list_concat_elem (ret, "unknown???");
3489
3490 if (ret == NULL)
3491 ret = xstrdup ("");
3492 return ret;
3493 }
3494
3495 static void
3496 debug_print_register (const char * func,
3497 struct regcache *regcache, int regno)
3498 {
3499 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3500
3501 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3502 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3503 && gdbarch_register_name (gdbarch, regno) != NULL
3504 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3505 fprintf_unfiltered (gdb_stdlog, "(%s)",
3506 gdbarch_register_name (gdbarch, regno));
3507 else
3508 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3509 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3510 {
3511 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3512 int i, size = register_size (gdbarch, regno);
3513 gdb_byte buf[MAX_REGISTER_SIZE];
3514
3515 regcache_raw_collect (regcache, regno, buf);
3516 fprintf_unfiltered (gdb_stdlog, " = ");
3517 for (i = 0; i < size; i++)
3518 {
3519 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3520 }
3521 if (size <= sizeof (LONGEST))
3522 {
3523 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3524
3525 fprintf_unfiltered (gdb_stdlog, " %s %s",
3526 core_addr_to_string_nz (val), plongest (val));
3527 }
3528 }
3529 fprintf_unfiltered (gdb_stdlog, "\n");
3530 }
3531
3532 void
3533 target_fetch_registers (struct regcache *regcache, int regno)
3534 {
3535 current_target.to_fetch_registers (&current_target, regcache, regno);
3536 if (targetdebug)
3537 debug_print_register ("target_fetch_registers", regcache, regno);
3538 }
3539
3540 void
3541 target_store_registers (struct regcache *regcache, int regno)
3542 {
3543 struct target_ops *t;
3544
3545 if (!may_write_registers)
3546 error (_("Writing to registers is not allowed (regno %d)"), regno);
3547
3548 current_target.to_store_registers (&current_target, regcache, regno);
3549 if (targetdebug)
3550 {
3551 debug_print_register ("target_store_registers", regcache, regno);
3552 }
3553 }
3554
3555 int
3556 target_core_of_thread (ptid_t ptid)
3557 {
3558 int retval = current_target.to_core_of_thread (&current_target, ptid);
3559
3560 if (targetdebug)
3561 fprintf_unfiltered (gdb_stdlog,
3562 "target_core_of_thread (%d) = %d\n",
3563 ptid_get_pid (ptid), retval);
3564 return retval;
3565 }
3566
3567 int
3568 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3569 {
3570 int retval = current_target.to_verify_memory (&current_target,
3571 data, memaddr, size);
3572
3573 if (targetdebug)
3574 fprintf_unfiltered (gdb_stdlog,
3575 "target_verify_memory (%s, %s) = %d\n",
3576 paddress (target_gdbarch (), memaddr),
3577 pulongest (size),
3578 retval);
3579 return retval;
3580 }
3581
3582 /* The documentation for this function is in its prototype declaration in
3583 target.h. */
3584
3585 int
3586 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3587 {
3588 int ret;
3589
3590 ret = current_target.to_insert_mask_watchpoint (&current_target,
3591 addr, mask, rw);
3592
3593 if (targetdebug)
3594 fprintf_unfiltered (gdb_stdlog, "\
3595 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3596 core_addr_to_string (addr),
3597 core_addr_to_string (mask), rw, ret);
3598
3599 return ret;
3600 }
3601
3602 /* The documentation for this function is in its prototype declaration in
3603 target.h. */
3604
3605 int
3606 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3607 {
3608 int ret;
3609
3610 ret = current_target.to_remove_mask_watchpoint (&current_target,
3611 addr, mask, rw);
3612
3613 if (targetdebug)
3614 fprintf_unfiltered (gdb_stdlog, "\
3615 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3616 core_addr_to_string (addr),
3617 core_addr_to_string (mask), rw, ret);
3618
3619 return ret;
3620 }
3621
3622 /* The documentation for this function is in its prototype declaration
3623 in target.h. */
3624
3625 int
3626 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3627 {
3628 return current_target.to_masked_watch_num_registers (&current_target,
3629 addr, mask);
3630 }
3631
3632 /* The documentation for this function is in its prototype declaration
3633 in target.h. */
3634
3635 int
3636 target_ranged_break_num_registers (void)
3637 {
3638 return current_target.to_ranged_break_num_registers (&current_target);
3639 }
3640
3641 /* See target.h. */
3642
3643 struct btrace_target_info *
3644 target_enable_btrace (ptid_t ptid)
3645 {
3646 return current_target.to_enable_btrace (&current_target, ptid);
3647 }
3648
3649 /* See target.h. */
3650
3651 void
3652 target_disable_btrace (struct btrace_target_info *btinfo)
3653 {
3654 current_target.to_disable_btrace (&current_target, btinfo);
3655 }
3656
3657 /* See target.h. */
3658
3659 void
3660 target_teardown_btrace (struct btrace_target_info *btinfo)
3661 {
3662 current_target.to_teardown_btrace (&current_target, btinfo);
3663 }
3664
3665 /* See target.h. */
3666
3667 enum btrace_error
3668 target_read_btrace (VEC (btrace_block_s) **btrace,
3669 struct btrace_target_info *btinfo,
3670 enum btrace_read_type type)
3671 {
3672 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3673 }
3674
3675 /* See target.h. */
3676
3677 void
3678 target_stop_recording (void)
3679 {
3680 current_target.to_stop_recording (&current_target);
3681 }
3682
3683 /* See target.h. */
3684
3685 void
3686 target_info_record (void)
3687 {
3688 struct target_ops *t;
3689
3690 for (t = current_target.beneath; t != NULL; t = t->beneath)
3691 if (t->to_info_record != NULL)
3692 {
3693 t->to_info_record (t);
3694 return;
3695 }
3696
3697 tcomplain ();
3698 }
3699
3700 /* See target.h. */
3701
3702 void
3703 target_save_record (const char *filename)
3704 {
3705 current_target.to_save_record (&current_target, filename);
3706 }
3707
3708 /* See target.h. */
3709
3710 int
3711 target_supports_delete_record (void)
3712 {
3713 struct target_ops *t;
3714
3715 for (t = current_target.beneath; t != NULL; t = t->beneath)
3716 if (t->to_delete_record != NULL)
3717 return 1;
3718
3719 return 0;
3720 }
3721
3722 /* See target.h. */
3723
3724 void
3725 target_delete_record (void)
3726 {
3727 current_target.to_delete_record (&current_target);
3728 }
3729
3730 /* See target.h. */
3731
3732 int
3733 target_record_is_replaying (void)
3734 {
3735 return current_target.to_record_is_replaying (&current_target);
3736 }
3737
3738 /* See target.h. */
3739
3740 void
3741 target_goto_record_begin (void)
3742 {
3743 current_target.to_goto_record_begin (&current_target);
3744 }
3745
3746 /* See target.h. */
3747
3748 void
3749 target_goto_record_end (void)
3750 {
3751 current_target.to_goto_record_end (&current_target);
3752 }
3753
3754 /* See target.h. */
3755
3756 void
3757 target_goto_record (ULONGEST insn)
3758 {
3759 current_target.to_goto_record (&current_target, insn);
3760 }
3761
3762 /* See target.h. */
3763
3764 void
3765 target_insn_history (int size, int flags)
3766 {
3767 current_target.to_insn_history (&current_target, size, flags);
3768 }
3769
3770 /* See target.h. */
3771
3772 void
3773 target_insn_history_from (ULONGEST from, int size, int flags)
3774 {
3775 current_target.to_insn_history_from (&current_target, from, size, flags);
3776 }
3777
3778 /* See target.h. */
3779
3780 void
3781 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3782 {
3783 current_target.to_insn_history_range (&current_target, begin, end, flags);
3784 }
3785
3786 /* See target.h. */
3787
3788 void
3789 target_call_history (int size, int flags)
3790 {
3791 current_target.to_call_history (&current_target, size, flags);
3792 }
3793
3794 /* See target.h. */
3795
3796 void
3797 target_call_history_from (ULONGEST begin, int size, int flags)
3798 {
3799 current_target.to_call_history_from (&current_target, begin, size, flags);
3800 }
3801
3802 /* See target.h. */
3803
3804 void
3805 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3806 {
3807 current_target.to_call_history_range (&current_target, begin, end, flags);
3808 }
3809
3810 static void
3811 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
3812 {
3813 debug_target.to_prepare_to_store (&debug_target, regcache);
3814
3815 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3816 }
3817
3818 /* See target.h. */
3819
3820 const struct frame_unwind *
3821 target_get_unwinder (void)
3822 {
3823 struct target_ops *t;
3824
3825 for (t = current_target.beneath; t != NULL; t = t->beneath)
3826 if (t->to_get_unwinder != NULL)
3827 return t->to_get_unwinder;
3828
3829 return NULL;
3830 }
3831
3832 /* See target.h. */
3833
3834 const struct frame_unwind *
3835 target_get_tailcall_unwinder (void)
3836 {
3837 struct target_ops *t;
3838
3839 for (t = current_target.beneath; t != NULL; t = t->beneath)
3840 if (t->to_get_tailcall_unwinder != NULL)
3841 return t->to_get_tailcall_unwinder;
3842
3843 return NULL;
3844 }
3845
3846 /* See target.h. */
3847
3848 CORE_ADDR
3849 forward_target_decr_pc_after_break (struct target_ops *ops,
3850 struct gdbarch *gdbarch)
3851 {
3852 for (; ops != NULL; ops = ops->beneath)
3853 if (ops->to_decr_pc_after_break != NULL)
3854 return ops->to_decr_pc_after_break (ops, gdbarch);
3855
3856 return gdbarch_decr_pc_after_break (gdbarch);
3857 }
3858
3859 /* See target.h. */
3860
3861 CORE_ADDR
3862 target_decr_pc_after_break (struct gdbarch *gdbarch)
3863 {
3864 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
3865 }
3866
3867 static int
3868 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3869 int write, struct mem_attrib *attrib,
3870 struct target_ops *target)
3871 {
3872 int retval;
3873
3874 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3875 attrib, target);
3876
3877 fprintf_unfiltered (gdb_stdlog,
3878 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3879 paddress (target_gdbarch (), memaddr), len,
3880 write ? "write" : "read", retval);
3881
3882 if (retval > 0)
3883 {
3884 int i;
3885
3886 fputs_unfiltered (", bytes =", gdb_stdlog);
3887 for (i = 0; i < retval; i++)
3888 {
3889 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3890 {
3891 if (targetdebug < 2 && i > 0)
3892 {
3893 fprintf_unfiltered (gdb_stdlog, " ...");
3894 break;
3895 }
3896 fprintf_unfiltered (gdb_stdlog, "\n");
3897 }
3898
3899 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3900 }
3901 }
3902
3903 fputc_unfiltered ('\n', gdb_stdlog);
3904
3905 return retval;
3906 }
3907
3908 static void
3909 debug_to_files_info (struct target_ops *target)
3910 {
3911 debug_target.to_files_info (target);
3912
3913 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3914 }
3915
3916 static int
3917 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3918 struct bp_target_info *bp_tgt)
3919 {
3920 int retval;
3921
3922 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
3923
3924 fprintf_unfiltered (gdb_stdlog,
3925 "target_insert_breakpoint (%s, xxx) = %ld\n",
3926 core_addr_to_string (bp_tgt->placed_address),
3927 (unsigned long) retval);
3928 return retval;
3929 }
3930
3931 static int
3932 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
3933 struct bp_target_info *bp_tgt)
3934 {
3935 int retval;
3936
3937 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
3938
3939 fprintf_unfiltered (gdb_stdlog,
3940 "target_remove_breakpoint (%s, xxx) = %ld\n",
3941 core_addr_to_string (bp_tgt->placed_address),
3942 (unsigned long) retval);
3943 return retval;
3944 }
3945
3946 static int
3947 debug_to_can_use_hw_breakpoint (struct target_ops *self,
3948 int type, int cnt, int from_tty)
3949 {
3950 int retval;
3951
3952 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
3953 type, cnt, from_tty);
3954
3955 fprintf_unfiltered (gdb_stdlog,
3956 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3957 (unsigned long) type,
3958 (unsigned long) cnt,
3959 (unsigned long) from_tty,
3960 (unsigned long) retval);
3961 return retval;
3962 }
3963
3964 static int
3965 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
3966 CORE_ADDR addr, int len)
3967 {
3968 CORE_ADDR retval;
3969
3970 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
3971 addr, len);
3972
3973 fprintf_unfiltered (gdb_stdlog,
3974 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3975 core_addr_to_string (addr), (unsigned long) len,
3976 core_addr_to_string (retval));
3977 return retval;
3978 }
3979
3980 static int
3981 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
3982 CORE_ADDR addr, int len, int rw,
3983 struct expression *cond)
3984 {
3985 int retval;
3986
3987 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
3988 addr, len,
3989 rw, cond);
3990
3991 fprintf_unfiltered (gdb_stdlog,
3992 "target_can_accel_watchpoint_condition "
3993 "(%s, %d, %d, %s) = %ld\n",
3994 core_addr_to_string (addr), len, rw,
3995 host_address_to_string (cond), (unsigned long) retval);
3996 return retval;
3997 }
3998
3999 static int
4000 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4001 {
4002 int retval;
4003
4004 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4005
4006 fprintf_unfiltered (gdb_stdlog,
4007 "target_stopped_by_watchpoint () = %ld\n",
4008 (unsigned long) retval);
4009 return retval;
4010 }
4011
4012 static int
4013 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4014 {
4015 int retval;
4016
4017 retval = debug_target.to_stopped_data_address (target, addr);
4018
4019 fprintf_unfiltered (gdb_stdlog,
4020 "target_stopped_data_address ([%s]) = %ld\n",
4021 core_addr_to_string (*addr),
4022 (unsigned long)retval);
4023 return retval;
4024 }
4025
4026 static int
4027 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4028 CORE_ADDR addr,
4029 CORE_ADDR start, int length)
4030 {
4031 int retval;
4032
4033 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4034 start, length);
4035
4036 fprintf_filtered (gdb_stdlog,
4037 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4038 core_addr_to_string (addr), core_addr_to_string (start),
4039 length, retval);
4040 return retval;
4041 }
4042
4043 static int
4044 debug_to_insert_hw_breakpoint (struct target_ops *self,
4045 struct gdbarch *gdbarch,
4046 struct bp_target_info *bp_tgt)
4047 {
4048 int retval;
4049
4050 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4051 gdbarch, bp_tgt);
4052
4053 fprintf_unfiltered (gdb_stdlog,
4054 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4055 core_addr_to_string (bp_tgt->placed_address),
4056 (unsigned long) retval);
4057 return retval;
4058 }
4059
4060 static int
4061 debug_to_remove_hw_breakpoint (struct target_ops *self,
4062 struct gdbarch *gdbarch,
4063 struct bp_target_info *bp_tgt)
4064 {
4065 int retval;
4066
4067 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4068 gdbarch, bp_tgt);
4069
4070 fprintf_unfiltered (gdb_stdlog,
4071 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4072 core_addr_to_string (bp_tgt->placed_address),
4073 (unsigned long) retval);
4074 return retval;
4075 }
4076
4077 static int
4078 debug_to_insert_watchpoint (struct target_ops *self,
4079 CORE_ADDR addr, int len, int type,
4080 struct expression *cond)
4081 {
4082 int retval;
4083
4084 retval = debug_target.to_insert_watchpoint (&debug_target,
4085 addr, len, type, cond);
4086
4087 fprintf_unfiltered (gdb_stdlog,
4088 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4089 core_addr_to_string (addr), len, type,
4090 host_address_to_string (cond), (unsigned long) retval);
4091 return retval;
4092 }
4093
4094 static int
4095 debug_to_remove_watchpoint (struct target_ops *self,
4096 CORE_ADDR addr, int len, int type,
4097 struct expression *cond)
4098 {
4099 int retval;
4100
4101 retval = debug_target.to_remove_watchpoint (&debug_target,
4102 addr, len, type, cond);
4103
4104 fprintf_unfiltered (gdb_stdlog,
4105 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4106 core_addr_to_string (addr), len, type,
4107 host_address_to_string (cond), (unsigned long) retval);
4108 return retval;
4109 }
4110
4111 static void
4112 debug_to_terminal_init (struct target_ops *self)
4113 {
4114 debug_target.to_terminal_init (&debug_target);
4115
4116 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4117 }
4118
4119 static void
4120 debug_to_terminal_inferior (struct target_ops *self)
4121 {
4122 debug_target.to_terminal_inferior (&debug_target);
4123
4124 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4125 }
4126
4127 static void
4128 debug_to_terminal_ours_for_output (struct target_ops *self)
4129 {
4130 debug_target.to_terminal_ours_for_output (&debug_target);
4131
4132 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4133 }
4134
4135 static void
4136 debug_to_terminal_ours (struct target_ops *self)
4137 {
4138 debug_target.to_terminal_ours (&debug_target);
4139
4140 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4141 }
4142
4143 static void
4144 debug_to_terminal_save_ours (struct target_ops *self)
4145 {
4146 debug_target.to_terminal_save_ours (&debug_target);
4147
4148 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4149 }
4150
4151 static void
4152 debug_to_terminal_info (struct target_ops *self,
4153 const char *arg, int from_tty)
4154 {
4155 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4156
4157 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4158 from_tty);
4159 }
4160
4161 static void
4162 debug_to_load (struct target_ops *self, char *args, int from_tty)
4163 {
4164 debug_target.to_load (&debug_target, args, from_tty);
4165
4166 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4167 }
4168
4169 static void
4170 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4171 {
4172 debug_target.to_post_startup_inferior (&debug_target, ptid);
4173
4174 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4175 ptid_get_pid (ptid));
4176 }
4177
4178 static int
4179 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4180 {
4181 int retval;
4182
4183 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4184
4185 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4186 pid, retval);
4187
4188 return retval;
4189 }
4190
4191 static int
4192 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4193 {
4194 int retval;
4195
4196 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4197
4198 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4199 pid, retval);
4200
4201 return retval;
4202 }
4203
4204 static int
4205 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4206 {
4207 int retval;
4208
4209 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4210
4211 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4212 pid, retval);
4213
4214 return retval;
4215 }
4216
4217 static int
4218 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4219 {
4220 int retval;
4221
4222 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4223
4224 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4225 pid, retval);
4226
4227 return retval;
4228 }
4229
4230 static int
4231 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4232 {
4233 int retval;
4234
4235 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4236
4237 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4238 pid, retval);
4239
4240 return retval;
4241 }
4242
4243 static int
4244 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4245 {
4246 int retval;
4247
4248 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4249
4250 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4251 pid, retval);
4252
4253 return retval;
4254 }
4255
4256 static int
4257 debug_to_has_exited (struct target_ops *self,
4258 int pid, int wait_status, int *exit_status)
4259 {
4260 int has_exited;
4261
4262 has_exited = debug_target.to_has_exited (&debug_target,
4263 pid, wait_status, exit_status);
4264
4265 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4266 pid, wait_status, *exit_status, has_exited);
4267
4268 return has_exited;
4269 }
4270
4271 static int
4272 debug_to_can_run (struct target_ops *self)
4273 {
4274 int retval;
4275
4276 retval = debug_target.to_can_run (&debug_target);
4277
4278 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4279
4280 return retval;
4281 }
4282
4283 static struct gdbarch *
4284 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4285 {
4286 struct gdbarch *retval;
4287
4288 retval = debug_target.to_thread_architecture (ops, ptid);
4289
4290 fprintf_unfiltered (gdb_stdlog,
4291 "target_thread_architecture (%s) = %s [%s]\n",
4292 target_pid_to_str (ptid),
4293 host_address_to_string (retval),
4294 gdbarch_bfd_arch_info (retval)->printable_name);
4295 return retval;
4296 }
4297
4298 static void
4299 debug_to_stop (struct target_ops *self, ptid_t ptid)
4300 {
4301 debug_target.to_stop (&debug_target, ptid);
4302
4303 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4304 target_pid_to_str (ptid));
4305 }
4306
4307 static void
4308 debug_to_rcmd (struct target_ops *self, char *command,
4309 struct ui_file *outbuf)
4310 {
4311 debug_target.to_rcmd (&debug_target, command, outbuf);
4312 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4313 }
4314
4315 static char *
4316 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
4317 {
4318 char *exec_file;
4319
4320 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
4321
4322 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4323 pid, exec_file);
4324
4325 return exec_file;
4326 }
4327
4328 static void
4329 setup_target_debug (void)
4330 {
4331 memcpy (&debug_target, &current_target, sizeof debug_target);
4332
4333 current_target.to_open = debug_to_open;
4334 current_target.to_post_attach = debug_to_post_attach;
4335 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4336 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4337 current_target.to_files_info = debug_to_files_info;
4338 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4339 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4340 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4341 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4342 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4343 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4344 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4345 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4346 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4347 current_target.to_watchpoint_addr_within_range
4348 = debug_to_watchpoint_addr_within_range;
4349 current_target.to_region_ok_for_hw_watchpoint
4350 = debug_to_region_ok_for_hw_watchpoint;
4351 current_target.to_can_accel_watchpoint_condition
4352 = debug_to_can_accel_watchpoint_condition;
4353 current_target.to_terminal_init = debug_to_terminal_init;
4354 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4355 current_target.to_terminal_ours_for_output
4356 = debug_to_terminal_ours_for_output;
4357 current_target.to_terminal_ours = debug_to_terminal_ours;
4358 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4359 current_target.to_terminal_info = debug_to_terminal_info;
4360 current_target.to_load = debug_to_load;
4361 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4362 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4363 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4364 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4365 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4366 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4367 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4368 current_target.to_has_exited = debug_to_has_exited;
4369 current_target.to_can_run = debug_to_can_run;
4370 current_target.to_stop = debug_to_stop;
4371 current_target.to_rcmd = debug_to_rcmd;
4372 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4373 current_target.to_thread_architecture = debug_to_thread_architecture;
4374 }
4375 \f
4376
4377 static char targ_desc[] =
4378 "Names of targets and files being debugged.\nShows the entire \
4379 stack of targets currently in use (including the exec-file,\n\
4380 core-file, and process, if any), as well as the symbol file name.";
4381
4382 static void
4383 default_rcmd (struct target_ops *self, char *command, struct ui_file *output)
4384 {
4385 error (_("\"monitor\" command not supported by this target."));
4386 }
4387
4388 static void
4389 do_monitor_command (char *cmd,
4390 int from_tty)
4391 {
4392 target_rcmd (cmd, gdb_stdtarg);
4393 }
4394
4395 /* Print the name of each layers of our target stack. */
4396
4397 static void
4398 maintenance_print_target_stack (char *cmd, int from_tty)
4399 {
4400 struct target_ops *t;
4401
4402 printf_filtered (_("The current target stack is:\n"));
4403
4404 for (t = target_stack; t != NULL; t = t->beneath)
4405 {
4406 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4407 }
4408 }
4409
4410 /* Controls if async mode is permitted. */
4411 int target_async_permitted = 0;
4412
4413 /* The set command writes to this variable. If the inferior is
4414 executing, target_async_permitted is *not* updated. */
4415 static int target_async_permitted_1 = 0;
4416
4417 static void
4418 set_target_async_command (char *args, int from_tty,
4419 struct cmd_list_element *c)
4420 {
4421 if (have_live_inferiors ())
4422 {
4423 target_async_permitted_1 = target_async_permitted;
4424 error (_("Cannot change this setting while the inferior is running."));
4425 }
4426
4427 target_async_permitted = target_async_permitted_1;
4428 }
4429
4430 static void
4431 show_target_async_command (struct ui_file *file, int from_tty,
4432 struct cmd_list_element *c,
4433 const char *value)
4434 {
4435 fprintf_filtered (file,
4436 _("Controlling the inferior in "
4437 "asynchronous mode is %s.\n"), value);
4438 }
4439
4440 /* Temporary copies of permission settings. */
4441
4442 static int may_write_registers_1 = 1;
4443 static int may_write_memory_1 = 1;
4444 static int may_insert_breakpoints_1 = 1;
4445 static int may_insert_tracepoints_1 = 1;
4446 static int may_insert_fast_tracepoints_1 = 1;
4447 static int may_stop_1 = 1;
4448
4449 /* Make the user-set values match the real values again. */
4450
4451 void
4452 update_target_permissions (void)
4453 {
4454 may_write_registers_1 = may_write_registers;
4455 may_write_memory_1 = may_write_memory;
4456 may_insert_breakpoints_1 = may_insert_breakpoints;
4457 may_insert_tracepoints_1 = may_insert_tracepoints;
4458 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4459 may_stop_1 = may_stop;
4460 }
4461
4462 /* The one function handles (most of) the permission flags in the same
4463 way. */
4464
4465 static void
4466 set_target_permissions (char *args, int from_tty,
4467 struct cmd_list_element *c)
4468 {
4469 if (target_has_execution)
4470 {
4471 update_target_permissions ();
4472 error (_("Cannot change this setting while the inferior is running."));
4473 }
4474
4475 /* Make the real values match the user-changed values. */
4476 may_write_registers = may_write_registers_1;
4477 may_insert_breakpoints = may_insert_breakpoints_1;
4478 may_insert_tracepoints = may_insert_tracepoints_1;
4479 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4480 may_stop = may_stop_1;
4481 update_observer_mode ();
4482 }
4483
4484 /* Set memory write permission independently of observer mode. */
4485
4486 static void
4487 set_write_memory_permission (char *args, int from_tty,
4488 struct cmd_list_element *c)
4489 {
4490 /* Make the real values match the user-changed values. */
4491 may_write_memory = may_write_memory_1;
4492 update_observer_mode ();
4493 }
4494
4495
4496 void
4497 initialize_targets (void)
4498 {
4499 init_dummy_target ();
4500 push_target (&dummy_target);
4501
4502 add_info ("target", target_info, targ_desc);
4503 add_info ("files", target_info, targ_desc);
4504
4505 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4506 Set target debugging."), _("\
4507 Show target debugging."), _("\
4508 When non-zero, target debugging is enabled. Higher numbers are more\n\
4509 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4510 command."),
4511 NULL,
4512 show_targetdebug,
4513 &setdebuglist, &showdebuglist);
4514
4515 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4516 &trust_readonly, _("\
4517 Set mode for reading from readonly sections."), _("\
4518 Show mode for reading from readonly sections."), _("\
4519 When this mode is on, memory reads from readonly sections (such as .text)\n\
4520 will be read from the object file instead of from the target. This will\n\
4521 result in significant performance improvement for remote targets."),
4522 NULL,
4523 show_trust_readonly,
4524 &setlist, &showlist);
4525
4526 add_com ("monitor", class_obscure, do_monitor_command,
4527 _("Send a command to the remote monitor (remote targets only)."));
4528
4529 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4530 _("Print the name of each layer of the internal target stack."),
4531 &maintenanceprintlist);
4532
4533 add_setshow_boolean_cmd ("target-async", no_class,
4534 &target_async_permitted_1, _("\
4535 Set whether gdb controls the inferior in asynchronous mode."), _("\
4536 Show whether gdb controls the inferior in asynchronous mode."), _("\
4537 Tells gdb whether to control the inferior in asynchronous mode."),
4538 set_target_async_command,
4539 show_target_async_command,
4540 &setlist,
4541 &showlist);
4542
4543 add_setshow_boolean_cmd ("may-write-registers", class_support,
4544 &may_write_registers_1, _("\
4545 Set permission to write into registers."), _("\
4546 Show permission to write into registers."), _("\
4547 When this permission is on, GDB may write into the target's registers.\n\
4548 Otherwise, any sort of write attempt will result in an error."),
4549 set_target_permissions, NULL,
4550 &setlist, &showlist);
4551
4552 add_setshow_boolean_cmd ("may-write-memory", class_support,
4553 &may_write_memory_1, _("\
4554 Set permission to write into target memory."), _("\
4555 Show permission to write into target memory."), _("\
4556 When this permission is on, GDB may write into the target's memory.\n\
4557 Otherwise, any sort of write attempt will result in an error."),
4558 set_write_memory_permission, NULL,
4559 &setlist, &showlist);
4560
4561 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4562 &may_insert_breakpoints_1, _("\
4563 Set permission to insert breakpoints in the target."), _("\
4564 Show permission to insert breakpoints in the target."), _("\
4565 When this permission is on, GDB may insert breakpoints in the program.\n\
4566 Otherwise, any sort of insertion attempt will result in an error."),
4567 set_target_permissions, NULL,
4568 &setlist, &showlist);
4569
4570 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4571 &may_insert_tracepoints_1, _("\
4572 Set permission to insert tracepoints in the target."), _("\
4573 Show permission to insert tracepoints in the target."), _("\
4574 When this permission is on, GDB may insert tracepoints in the program.\n\
4575 Otherwise, any sort of insertion attempt will result in an error."),
4576 set_target_permissions, NULL,
4577 &setlist, &showlist);
4578
4579 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4580 &may_insert_fast_tracepoints_1, _("\
4581 Set permission to insert fast tracepoints in the target."), _("\
4582 Show permission to insert fast tracepoints in the target."), _("\
4583 When this permission is on, GDB may insert fast tracepoints.\n\
4584 Otherwise, any sort of insertion attempt will result in an error."),
4585 set_target_permissions, NULL,
4586 &setlist, &showlist);
4587
4588 add_setshow_boolean_cmd ("may-interrupt", class_support,
4589 &may_stop_1, _("\
4590 Set permission to interrupt or signal the target."), _("\
4591 Show permission to interrupt or signal the target."), _("\
4592 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4593 Otherwise, any attempt to interrupt or stop will be ignored."),
4594 set_target_permissions, NULL,
4595 &setlist, &showlist);
4596 }
This page took 0.252314 seconds and 5 git commands to generate.