PR gdb/17472: With annotations, input while executing in the foreground crashes readl...
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46
47 static void target_info (char *, int);
48
49 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
50
51 static void default_terminal_info (struct target_ops *, const char *, int);
52
53 static int default_watchpoint_addr_within_range (struct target_ops *,
54 CORE_ADDR, CORE_ADDR, int);
55
56 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
57 CORE_ADDR, int);
58
59 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
60
61 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
62 long lwp, long tid);
63
64 static int default_follow_fork (struct target_ops *self, int follow_child,
65 int detach_fork);
66
67 static void default_mourn_inferior (struct target_ops *self);
68
69 static int default_search_memory (struct target_ops *ops,
70 CORE_ADDR start_addr,
71 ULONGEST search_space_len,
72 const gdb_byte *pattern,
73 ULONGEST pattern_len,
74 CORE_ADDR *found_addrp);
75
76 static int default_verify_memory (struct target_ops *self,
77 const gdb_byte *data,
78 CORE_ADDR memaddr, ULONGEST size);
79
80 static struct address_space *default_thread_address_space
81 (struct target_ops *self, ptid_t ptid);
82
83 static void tcomplain (void) ATTRIBUTE_NORETURN;
84
85 static int return_zero (struct target_ops *);
86
87 static int return_zero_has_execution (struct target_ops *, ptid_t);
88
89 static void target_command (char *, int);
90
91 static struct target_ops *find_default_run_target (char *);
92
93 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
94 ptid_t ptid);
95
96 static int dummy_find_memory_regions (struct target_ops *self,
97 find_memory_region_ftype ignore1,
98 void *ignore2);
99
100 static char *dummy_make_corefile_notes (struct target_ops *self,
101 bfd *ignore1, int *ignore2);
102
103 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
104
105 static enum exec_direction_kind default_execution_direction
106 (struct target_ops *self);
107
108 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
109 struct gdbarch *gdbarch);
110
111 static struct target_ops debug_target;
112
113 #include "target-delegates.c"
114
115 static void init_dummy_target (void);
116
117 static void update_current_target (void);
118
119 /* Vector of existing target structures. */
120 typedef struct target_ops *target_ops_p;
121 DEF_VEC_P (target_ops_p);
122 static VEC (target_ops_p) *target_structs;
123
124 /* The initial current target, so that there is always a semi-valid
125 current target. */
126
127 static struct target_ops dummy_target;
128
129 /* Top of target stack. */
130
131 static struct target_ops *target_stack;
132
133 /* The target structure we are currently using to talk to a process
134 or file or whatever "inferior" we have. */
135
136 struct target_ops current_target;
137
138 /* Command list for target. */
139
140 static struct cmd_list_element *targetlist = NULL;
141
142 /* Nonzero if we should trust readonly sections from the
143 executable when reading memory. */
144
145 static int trust_readonly = 0;
146
147 /* Nonzero if we should show true memory content including
148 memory breakpoint inserted by gdb. */
149
150 static int show_memory_breakpoints = 0;
151
152 /* These globals control whether GDB attempts to perform these
153 operations; they are useful for targets that need to prevent
154 inadvertant disruption, such as in non-stop mode. */
155
156 int may_write_registers = 1;
157
158 int may_write_memory = 1;
159
160 int may_insert_breakpoints = 1;
161
162 int may_insert_tracepoints = 1;
163
164 int may_insert_fast_tracepoints = 1;
165
166 int may_stop = 1;
167
168 /* Non-zero if we want to see trace of target level stuff. */
169
170 static unsigned int targetdebug = 0;
171
172 static void
173 set_targetdebug (char *args, int from_tty, struct cmd_list_element *c)
174 {
175 update_current_target ();
176 }
177
178 static void
179 show_targetdebug (struct ui_file *file, int from_tty,
180 struct cmd_list_element *c, const char *value)
181 {
182 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
183 }
184
185 static void setup_target_debug (void);
186
187 /* The user just typed 'target' without the name of a target. */
188
189 static void
190 target_command (char *arg, int from_tty)
191 {
192 fputs_filtered ("Argument required (target name). Try `help target'\n",
193 gdb_stdout);
194 }
195
196 /* Default target_has_* methods for process_stratum targets. */
197
198 int
199 default_child_has_all_memory (struct target_ops *ops)
200 {
201 /* If no inferior selected, then we can't read memory here. */
202 if (ptid_equal (inferior_ptid, null_ptid))
203 return 0;
204
205 return 1;
206 }
207
208 int
209 default_child_has_memory (struct target_ops *ops)
210 {
211 /* If no inferior selected, then we can't read memory here. */
212 if (ptid_equal (inferior_ptid, null_ptid))
213 return 0;
214
215 return 1;
216 }
217
218 int
219 default_child_has_stack (struct target_ops *ops)
220 {
221 /* If no inferior selected, there's no stack. */
222 if (ptid_equal (inferior_ptid, null_ptid))
223 return 0;
224
225 return 1;
226 }
227
228 int
229 default_child_has_registers (struct target_ops *ops)
230 {
231 /* Can't read registers from no inferior. */
232 if (ptid_equal (inferior_ptid, null_ptid))
233 return 0;
234
235 return 1;
236 }
237
238 int
239 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
240 {
241 /* If there's no thread selected, then we can't make it run through
242 hoops. */
243 if (ptid_equal (the_ptid, null_ptid))
244 return 0;
245
246 return 1;
247 }
248
249
250 int
251 target_has_all_memory_1 (void)
252 {
253 struct target_ops *t;
254
255 for (t = current_target.beneath; t != NULL; t = t->beneath)
256 if (t->to_has_all_memory (t))
257 return 1;
258
259 return 0;
260 }
261
262 int
263 target_has_memory_1 (void)
264 {
265 struct target_ops *t;
266
267 for (t = current_target.beneath; t != NULL; t = t->beneath)
268 if (t->to_has_memory (t))
269 return 1;
270
271 return 0;
272 }
273
274 int
275 target_has_stack_1 (void)
276 {
277 struct target_ops *t;
278
279 for (t = current_target.beneath; t != NULL; t = t->beneath)
280 if (t->to_has_stack (t))
281 return 1;
282
283 return 0;
284 }
285
286 int
287 target_has_registers_1 (void)
288 {
289 struct target_ops *t;
290
291 for (t = current_target.beneath; t != NULL; t = t->beneath)
292 if (t->to_has_registers (t))
293 return 1;
294
295 return 0;
296 }
297
298 int
299 target_has_execution_1 (ptid_t the_ptid)
300 {
301 struct target_ops *t;
302
303 for (t = current_target.beneath; t != NULL; t = t->beneath)
304 if (t->to_has_execution (t, the_ptid))
305 return 1;
306
307 return 0;
308 }
309
310 int
311 target_has_execution_current (void)
312 {
313 return target_has_execution_1 (inferior_ptid);
314 }
315
316 /* Complete initialization of T. This ensures that various fields in
317 T are set, if needed by the target implementation. */
318
319 void
320 complete_target_initialization (struct target_ops *t)
321 {
322 /* Provide default values for all "must have" methods. */
323
324 if (t->to_has_all_memory == NULL)
325 t->to_has_all_memory = return_zero;
326
327 if (t->to_has_memory == NULL)
328 t->to_has_memory = return_zero;
329
330 if (t->to_has_stack == NULL)
331 t->to_has_stack = return_zero;
332
333 if (t->to_has_registers == NULL)
334 t->to_has_registers = return_zero;
335
336 if (t->to_has_execution == NULL)
337 t->to_has_execution = return_zero_has_execution;
338
339 /* These methods can be called on an unpushed target and so require
340 a default implementation if the target might plausibly be the
341 default run target. */
342 gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL
343 && t->to_supports_non_stop != NULL));
344
345 install_delegators (t);
346 }
347
348 /* This is used to implement the various target commands. */
349
350 static void
351 open_target (char *args, int from_tty, struct cmd_list_element *command)
352 {
353 struct target_ops *ops = get_cmd_context (command);
354
355 if (targetdebug)
356 fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n",
357 ops->to_shortname);
358
359 ops->to_open (args, from_tty);
360
361 if (targetdebug)
362 fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n",
363 ops->to_shortname, args, from_tty);
364 }
365
366 /* Add possible target architecture T to the list and add a new
367 command 'target T->to_shortname'. Set COMPLETER as the command's
368 completer if not NULL. */
369
370 void
371 add_target_with_completer (struct target_ops *t,
372 completer_ftype *completer)
373 {
374 struct cmd_list_element *c;
375
376 complete_target_initialization (t);
377
378 VEC_safe_push (target_ops_p, target_structs, t);
379
380 if (targetlist == NULL)
381 add_prefix_cmd ("target", class_run, target_command, _("\
382 Connect to a target machine or process.\n\
383 The first argument is the type or protocol of the target machine.\n\
384 Remaining arguments are interpreted by the target protocol. For more\n\
385 information on the arguments for a particular protocol, type\n\
386 `help target ' followed by the protocol name."),
387 &targetlist, "target ", 0, &cmdlist);
388 c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist);
389 set_cmd_sfunc (c, open_target);
390 set_cmd_context (c, t);
391 if (completer != NULL)
392 set_cmd_completer (c, completer);
393 }
394
395 /* Add a possible target architecture to the list. */
396
397 void
398 add_target (struct target_ops *t)
399 {
400 add_target_with_completer (t, NULL);
401 }
402
403 /* See target.h. */
404
405 void
406 add_deprecated_target_alias (struct target_ops *t, char *alias)
407 {
408 struct cmd_list_element *c;
409 char *alt;
410
411 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
412 see PR cli/15104. */
413 c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist);
414 set_cmd_sfunc (c, open_target);
415 set_cmd_context (c, t);
416 alt = xstrprintf ("target %s", t->to_shortname);
417 deprecate_cmd (c, alt);
418 }
419
420 /* Stub functions */
421
422 void
423 target_kill (void)
424 {
425 current_target.to_kill (&current_target);
426 }
427
428 void
429 target_load (const char *arg, int from_tty)
430 {
431 target_dcache_invalidate ();
432 (*current_target.to_load) (&current_target, arg, from_tty);
433 }
434
435 /* Possible terminal states. */
436
437 enum terminal_state
438 {
439 /* The inferior's terminal settings are in effect. */
440 terminal_is_inferior = 0,
441
442 /* Some of our terminal settings are in effect, enough to get
443 proper output. */
444 terminal_is_ours_for_output = 1,
445
446 /* Our terminal settings are in effect, for output and input. */
447 terminal_is_ours = 2
448 };
449
450 static enum terminal_state terminal_state;
451
452 /* See target.h. */
453
454 void
455 target_terminal_init (void)
456 {
457 (*current_target.to_terminal_init) (&current_target);
458
459 terminal_state = terminal_is_ours;
460 }
461
462 /* See target.h. */
463
464 int
465 target_terminal_is_inferior (void)
466 {
467 return (terminal_state == terminal_is_inferior);
468 }
469
470 /* See target.h. */
471
472 void
473 target_terminal_inferior (void)
474 {
475 /* A background resume (``run&'') should leave GDB in control of the
476 terminal. Use target_can_async_p, not target_is_async_p, since at
477 this point the target is not async yet. However, if sync_execution
478 is not set, we know it will become async prior to resume. */
479 if (target_can_async_p () && !sync_execution)
480 return;
481
482 if (terminal_state == terminal_is_inferior)
483 return;
484
485 /* If GDB is resuming the inferior in the foreground, install
486 inferior's terminal modes. */
487 (*current_target.to_terminal_inferior) (&current_target);
488 terminal_state = terminal_is_inferior;
489 }
490
491 /* See target.h. */
492
493 void
494 target_terminal_ours (void)
495 {
496 if (terminal_state == terminal_is_ours)
497 return;
498
499 (*current_target.to_terminal_ours) (&current_target);
500 terminal_state = terminal_is_ours;
501 }
502
503 /* See target.h. */
504
505 void
506 target_terminal_ours_for_output (void)
507 {
508 if (terminal_state != terminal_is_inferior)
509 return;
510 (*current_target.to_terminal_ours_for_output) (&current_target);
511 terminal_state = terminal_is_ours_for_output;
512 }
513
514 /* See target.h. */
515
516 int
517 target_supports_terminal_ours (void)
518 {
519 struct target_ops *t;
520
521 for (t = current_target.beneath; t != NULL; t = t->beneath)
522 {
523 if (t->to_terminal_ours != delegate_terminal_ours
524 && t->to_terminal_ours != tdefault_terminal_ours)
525 return 1;
526 }
527
528 return 0;
529 }
530
531 static void
532 tcomplain (void)
533 {
534 error (_("You can't do that when your target is `%s'"),
535 current_target.to_shortname);
536 }
537
538 void
539 noprocess (void)
540 {
541 error (_("You can't do that without a process to debug."));
542 }
543
544 static void
545 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
546 {
547 printf_unfiltered (_("No saved terminal information.\n"));
548 }
549
550 /* A default implementation for the to_get_ada_task_ptid target method.
551
552 This function builds the PTID by using both LWP and TID as part of
553 the PTID lwp and tid elements. The pid used is the pid of the
554 inferior_ptid. */
555
556 static ptid_t
557 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
558 {
559 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
560 }
561
562 static enum exec_direction_kind
563 default_execution_direction (struct target_ops *self)
564 {
565 if (!target_can_execute_reverse)
566 return EXEC_FORWARD;
567 else if (!target_can_async_p ())
568 return EXEC_FORWARD;
569 else
570 gdb_assert_not_reached ("\
571 to_execution_direction must be implemented for reverse async");
572 }
573
574 /* Go through the target stack from top to bottom, copying over zero
575 entries in current_target, then filling in still empty entries. In
576 effect, we are doing class inheritance through the pushed target
577 vectors.
578
579 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
580 is currently implemented, is that it discards any knowledge of
581 which target an inherited method originally belonged to.
582 Consequently, new new target methods should instead explicitly and
583 locally search the target stack for the target that can handle the
584 request. */
585
586 static void
587 update_current_target (void)
588 {
589 struct target_ops *t;
590
591 /* First, reset current's contents. */
592 memset (&current_target, 0, sizeof (current_target));
593
594 /* Install the delegators. */
595 install_delegators (&current_target);
596
597 current_target.to_stratum = target_stack->to_stratum;
598
599 #define INHERIT(FIELD, TARGET) \
600 if (!current_target.FIELD) \
601 current_target.FIELD = (TARGET)->FIELD
602
603 /* Do not add any new INHERITs here. Instead, use the delegation
604 mechanism provided by make-target-delegates. */
605 for (t = target_stack; t; t = t->beneath)
606 {
607 INHERIT (to_shortname, t);
608 INHERIT (to_longname, t);
609 INHERIT (to_attach_no_wait, t);
610 INHERIT (to_have_steppable_watchpoint, t);
611 INHERIT (to_have_continuable_watchpoint, t);
612 INHERIT (to_has_thread_control, t);
613 }
614 #undef INHERIT
615
616 /* Finally, position the target-stack beneath the squashed
617 "current_target". That way code looking for a non-inherited
618 target method can quickly and simply find it. */
619 current_target.beneath = target_stack;
620
621 if (targetdebug)
622 setup_target_debug ();
623 }
624
625 /* Push a new target type into the stack of the existing target accessors,
626 possibly superseding some of the existing accessors.
627
628 Rather than allow an empty stack, we always have the dummy target at
629 the bottom stratum, so we can call the function vectors without
630 checking them. */
631
632 void
633 push_target (struct target_ops *t)
634 {
635 struct target_ops **cur;
636
637 /* Check magic number. If wrong, it probably means someone changed
638 the struct definition, but not all the places that initialize one. */
639 if (t->to_magic != OPS_MAGIC)
640 {
641 fprintf_unfiltered (gdb_stderr,
642 "Magic number of %s target struct wrong\n",
643 t->to_shortname);
644 internal_error (__FILE__, __LINE__,
645 _("failed internal consistency check"));
646 }
647
648 /* Find the proper stratum to install this target in. */
649 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
650 {
651 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
652 break;
653 }
654
655 /* If there's already targets at this stratum, remove them. */
656 /* FIXME: cagney/2003-10-15: I think this should be popping all
657 targets to CUR, and not just those at this stratum level. */
658 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
659 {
660 /* There's already something at this stratum level. Close it,
661 and un-hook it from the stack. */
662 struct target_ops *tmp = (*cur);
663
664 (*cur) = (*cur)->beneath;
665 tmp->beneath = NULL;
666 target_close (tmp);
667 }
668
669 /* We have removed all targets in our stratum, now add the new one. */
670 t->beneath = (*cur);
671 (*cur) = t;
672
673 update_current_target ();
674 }
675
676 /* Remove a target_ops vector from the stack, wherever it may be.
677 Return how many times it was removed (0 or 1). */
678
679 int
680 unpush_target (struct target_ops *t)
681 {
682 struct target_ops **cur;
683 struct target_ops *tmp;
684
685 if (t->to_stratum == dummy_stratum)
686 internal_error (__FILE__, __LINE__,
687 _("Attempt to unpush the dummy target"));
688
689 /* Look for the specified target. Note that we assume that a target
690 can only occur once in the target stack. */
691
692 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
693 {
694 if ((*cur) == t)
695 break;
696 }
697
698 /* If we don't find target_ops, quit. Only open targets should be
699 closed. */
700 if ((*cur) == NULL)
701 return 0;
702
703 /* Unchain the target. */
704 tmp = (*cur);
705 (*cur) = (*cur)->beneath;
706 tmp->beneath = NULL;
707
708 update_current_target ();
709
710 /* Finally close the target. Note we do this after unchaining, so
711 any target method calls from within the target_close
712 implementation don't end up in T anymore. */
713 target_close (t);
714
715 return 1;
716 }
717
718 void
719 pop_all_targets_above (enum strata above_stratum)
720 {
721 while ((int) (current_target.to_stratum) > (int) above_stratum)
722 {
723 if (!unpush_target (target_stack))
724 {
725 fprintf_unfiltered (gdb_stderr,
726 "pop_all_targets couldn't find target %s\n",
727 target_stack->to_shortname);
728 internal_error (__FILE__, __LINE__,
729 _("failed internal consistency check"));
730 break;
731 }
732 }
733 }
734
735 void
736 pop_all_targets (void)
737 {
738 pop_all_targets_above (dummy_stratum);
739 }
740
741 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
742
743 int
744 target_is_pushed (struct target_ops *t)
745 {
746 struct target_ops *cur;
747
748 /* Check magic number. If wrong, it probably means someone changed
749 the struct definition, but not all the places that initialize one. */
750 if (t->to_magic != OPS_MAGIC)
751 {
752 fprintf_unfiltered (gdb_stderr,
753 "Magic number of %s target struct wrong\n",
754 t->to_shortname);
755 internal_error (__FILE__, __LINE__,
756 _("failed internal consistency check"));
757 }
758
759 for (cur = target_stack; cur != NULL; cur = cur->beneath)
760 if (cur == t)
761 return 1;
762
763 return 0;
764 }
765
766 /* Default implementation of to_get_thread_local_address. */
767
768 static void
769 generic_tls_error (void)
770 {
771 throw_error (TLS_GENERIC_ERROR,
772 _("Cannot find thread-local variables on this target"));
773 }
774
775 /* Using the objfile specified in OBJFILE, find the address for the
776 current thread's thread-local storage with offset OFFSET. */
777 CORE_ADDR
778 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
779 {
780 volatile CORE_ADDR addr = 0;
781 struct target_ops *target = &current_target;
782
783 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
784 {
785 ptid_t ptid = inferior_ptid;
786 volatile struct gdb_exception ex;
787
788 TRY_CATCH (ex, RETURN_MASK_ALL)
789 {
790 CORE_ADDR lm_addr;
791
792 /* Fetch the load module address for this objfile. */
793 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
794 objfile);
795
796 addr = target->to_get_thread_local_address (target, ptid,
797 lm_addr, offset);
798 }
799 /* If an error occurred, print TLS related messages here. Otherwise,
800 throw the error to some higher catcher. */
801 if (ex.reason < 0)
802 {
803 int objfile_is_library = (objfile->flags & OBJF_SHARED);
804
805 switch (ex.error)
806 {
807 case TLS_NO_LIBRARY_SUPPORT_ERROR:
808 error (_("Cannot find thread-local variables "
809 "in this thread library."));
810 break;
811 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
812 if (objfile_is_library)
813 error (_("Cannot find shared library `%s' in dynamic"
814 " linker's load module list"), objfile_name (objfile));
815 else
816 error (_("Cannot find executable file `%s' in dynamic"
817 " linker's load module list"), objfile_name (objfile));
818 break;
819 case TLS_NOT_ALLOCATED_YET_ERROR:
820 if (objfile_is_library)
821 error (_("The inferior has not yet allocated storage for"
822 " thread-local variables in\n"
823 "the shared library `%s'\n"
824 "for %s"),
825 objfile_name (objfile), target_pid_to_str (ptid));
826 else
827 error (_("The inferior has not yet allocated storage for"
828 " thread-local variables in\n"
829 "the executable `%s'\n"
830 "for %s"),
831 objfile_name (objfile), target_pid_to_str (ptid));
832 break;
833 case TLS_GENERIC_ERROR:
834 if (objfile_is_library)
835 error (_("Cannot find thread-local storage for %s, "
836 "shared library %s:\n%s"),
837 target_pid_to_str (ptid),
838 objfile_name (objfile), ex.message);
839 else
840 error (_("Cannot find thread-local storage for %s, "
841 "executable file %s:\n%s"),
842 target_pid_to_str (ptid),
843 objfile_name (objfile), ex.message);
844 break;
845 default:
846 throw_exception (ex);
847 break;
848 }
849 }
850 }
851 /* It wouldn't be wrong here to try a gdbarch method, too; finding
852 TLS is an ABI-specific thing. But we don't do that yet. */
853 else
854 error (_("Cannot find thread-local variables on this target"));
855
856 return addr;
857 }
858
859 const char *
860 target_xfer_status_to_string (enum target_xfer_status status)
861 {
862 #define CASE(X) case X: return #X
863 switch (status)
864 {
865 CASE(TARGET_XFER_E_IO);
866 CASE(TARGET_XFER_UNAVAILABLE);
867 default:
868 return "<unknown>";
869 }
870 #undef CASE
871 };
872
873
874 #undef MIN
875 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
876
877 /* target_read_string -- read a null terminated string, up to LEN bytes,
878 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
879 Set *STRING to a pointer to malloc'd memory containing the data; the caller
880 is responsible for freeing it. Return the number of bytes successfully
881 read. */
882
883 int
884 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
885 {
886 int tlen, offset, i;
887 gdb_byte buf[4];
888 int errcode = 0;
889 char *buffer;
890 int buffer_allocated;
891 char *bufptr;
892 unsigned int nbytes_read = 0;
893
894 gdb_assert (string);
895
896 /* Small for testing. */
897 buffer_allocated = 4;
898 buffer = xmalloc (buffer_allocated);
899 bufptr = buffer;
900
901 while (len > 0)
902 {
903 tlen = MIN (len, 4 - (memaddr & 3));
904 offset = memaddr & 3;
905
906 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
907 if (errcode != 0)
908 {
909 /* The transfer request might have crossed the boundary to an
910 unallocated region of memory. Retry the transfer, requesting
911 a single byte. */
912 tlen = 1;
913 offset = 0;
914 errcode = target_read_memory (memaddr, buf, 1);
915 if (errcode != 0)
916 goto done;
917 }
918
919 if (bufptr - buffer + tlen > buffer_allocated)
920 {
921 unsigned int bytes;
922
923 bytes = bufptr - buffer;
924 buffer_allocated *= 2;
925 buffer = xrealloc (buffer, buffer_allocated);
926 bufptr = buffer + bytes;
927 }
928
929 for (i = 0; i < tlen; i++)
930 {
931 *bufptr++ = buf[i + offset];
932 if (buf[i + offset] == '\000')
933 {
934 nbytes_read += i + 1;
935 goto done;
936 }
937 }
938
939 memaddr += tlen;
940 len -= tlen;
941 nbytes_read += tlen;
942 }
943 done:
944 *string = buffer;
945 if (errnop != NULL)
946 *errnop = errcode;
947 return nbytes_read;
948 }
949
950 struct target_section_table *
951 target_get_section_table (struct target_ops *target)
952 {
953 return (*target->to_get_section_table) (target);
954 }
955
956 /* Find a section containing ADDR. */
957
958 struct target_section *
959 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
960 {
961 struct target_section_table *table = target_get_section_table (target);
962 struct target_section *secp;
963
964 if (table == NULL)
965 return NULL;
966
967 for (secp = table->sections; secp < table->sections_end; secp++)
968 {
969 if (addr >= secp->addr && addr < secp->endaddr)
970 return secp;
971 }
972 return NULL;
973 }
974
975
976 /* Helper for the memory xfer routines. Checks the attributes of the
977 memory region of MEMADDR against the read or write being attempted.
978 If the access is permitted returns true, otherwise returns false.
979 REGION_P is an optional output parameter. If not-NULL, it is
980 filled with a pointer to the memory region of MEMADDR. REG_LEN
981 returns LEN trimmed to the end of the region. This is how much the
982 caller can continue requesting, if the access is permitted. A
983 single xfer request must not straddle memory region boundaries. */
984
985 static int
986 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
987 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
988 struct mem_region **region_p)
989 {
990 struct mem_region *region;
991
992 region = lookup_mem_region (memaddr);
993
994 if (region_p != NULL)
995 *region_p = region;
996
997 switch (region->attrib.mode)
998 {
999 case MEM_RO:
1000 if (writebuf != NULL)
1001 return 0;
1002 break;
1003
1004 case MEM_WO:
1005 if (readbuf != NULL)
1006 return 0;
1007 break;
1008
1009 case MEM_FLASH:
1010 /* We only support writing to flash during "load" for now. */
1011 if (writebuf != NULL)
1012 error (_("Writing to flash memory forbidden in this context"));
1013 break;
1014
1015 case MEM_NONE:
1016 return 0;
1017 }
1018
1019 /* region->hi == 0 means there's no upper bound. */
1020 if (memaddr + len < region->hi || region->hi == 0)
1021 *reg_len = len;
1022 else
1023 *reg_len = region->hi - memaddr;
1024
1025 return 1;
1026 }
1027
1028 /* Read memory from more than one valid target. A core file, for
1029 instance, could have some of memory but delegate other bits to
1030 the target below it. So, we must manually try all targets. */
1031
1032 static enum target_xfer_status
1033 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1034 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1035 ULONGEST *xfered_len)
1036 {
1037 enum target_xfer_status res;
1038
1039 do
1040 {
1041 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1042 readbuf, writebuf, memaddr, len,
1043 xfered_len);
1044 if (res == TARGET_XFER_OK)
1045 break;
1046
1047 /* Stop if the target reports that the memory is not available. */
1048 if (res == TARGET_XFER_UNAVAILABLE)
1049 break;
1050
1051 /* We want to continue past core files to executables, but not
1052 past a running target's memory. */
1053 if (ops->to_has_all_memory (ops))
1054 break;
1055
1056 ops = ops->beneath;
1057 }
1058 while (ops != NULL);
1059
1060 /* The cache works at the raw memory level. Make sure the cache
1061 gets updated with raw contents no matter what kind of memory
1062 object was originally being written. Note we do write-through
1063 first, so that if it fails, we don't write to the cache contents
1064 that never made it to the target. */
1065 if (writebuf != NULL
1066 && !ptid_equal (inferior_ptid, null_ptid)
1067 && target_dcache_init_p ()
1068 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1069 {
1070 DCACHE *dcache = target_dcache_get ();
1071
1072 /* Note that writing to an area of memory which wasn't present
1073 in the cache doesn't cause it to be loaded in. */
1074 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1075 }
1076
1077 return res;
1078 }
1079
1080 /* Perform a partial memory transfer.
1081 For docs see target.h, to_xfer_partial. */
1082
1083 static enum target_xfer_status
1084 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1085 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1086 ULONGEST len, ULONGEST *xfered_len)
1087 {
1088 enum target_xfer_status res;
1089 ULONGEST reg_len;
1090 struct mem_region *region;
1091 struct inferior *inf;
1092
1093 /* For accesses to unmapped overlay sections, read directly from
1094 files. Must do this first, as MEMADDR may need adjustment. */
1095 if (readbuf != NULL && overlay_debugging)
1096 {
1097 struct obj_section *section = find_pc_overlay (memaddr);
1098
1099 if (pc_in_unmapped_range (memaddr, section))
1100 {
1101 struct target_section_table *table
1102 = target_get_section_table (ops);
1103 const char *section_name = section->the_bfd_section->name;
1104
1105 memaddr = overlay_mapped_address (memaddr, section);
1106 return section_table_xfer_memory_partial (readbuf, writebuf,
1107 memaddr, len, xfered_len,
1108 table->sections,
1109 table->sections_end,
1110 section_name);
1111 }
1112 }
1113
1114 /* Try the executable files, if "trust-readonly-sections" is set. */
1115 if (readbuf != NULL && trust_readonly)
1116 {
1117 struct target_section *secp;
1118 struct target_section_table *table;
1119
1120 secp = target_section_by_addr (ops, memaddr);
1121 if (secp != NULL
1122 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1123 secp->the_bfd_section)
1124 & SEC_READONLY))
1125 {
1126 table = target_get_section_table (ops);
1127 return section_table_xfer_memory_partial (readbuf, writebuf,
1128 memaddr, len, xfered_len,
1129 table->sections,
1130 table->sections_end,
1131 NULL);
1132 }
1133 }
1134
1135 /* Try GDB's internal data cache. */
1136
1137 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1138 &region))
1139 return TARGET_XFER_E_IO;
1140
1141 if (!ptid_equal (inferior_ptid, null_ptid))
1142 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1143 else
1144 inf = NULL;
1145
1146 if (inf != NULL
1147 && readbuf != NULL
1148 /* The dcache reads whole cache lines; that doesn't play well
1149 with reading from a trace buffer, because reading outside of
1150 the collected memory range fails. */
1151 && get_traceframe_number () == -1
1152 && (region->attrib.cache
1153 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1154 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1155 {
1156 DCACHE *dcache = target_dcache_get_or_init ();
1157
1158 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1159 reg_len, xfered_len);
1160 }
1161
1162 /* If none of those methods found the memory we wanted, fall back
1163 to a target partial transfer. Normally a single call to
1164 to_xfer_partial is enough; if it doesn't recognize an object
1165 it will call the to_xfer_partial of the next target down.
1166 But for memory this won't do. Memory is the only target
1167 object which can be read from more than one valid target.
1168 A core file, for instance, could have some of memory but
1169 delegate other bits to the target below it. So, we must
1170 manually try all targets. */
1171
1172 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1173 xfered_len);
1174
1175 /* If we still haven't got anything, return the last error. We
1176 give up. */
1177 return res;
1178 }
1179
1180 /* Perform a partial memory transfer. For docs see target.h,
1181 to_xfer_partial. */
1182
1183 static enum target_xfer_status
1184 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1185 gdb_byte *readbuf, const gdb_byte *writebuf,
1186 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1187 {
1188 enum target_xfer_status res;
1189
1190 /* Zero length requests are ok and require no work. */
1191 if (len == 0)
1192 return TARGET_XFER_EOF;
1193
1194 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1195 breakpoint insns, thus hiding out from higher layers whether
1196 there are software breakpoints inserted in the code stream. */
1197 if (readbuf != NULL)
1198 {
1199 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1200 xfered_len);
1201
1202 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1203 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1204 }
1205 else
1206 {
1207 void *buf;
1208 struct cleanup *old_chain;
1209
1210 /* A large write request is likely to be partially satisfied
1211 by memory_xfer_partial_1. We will continually malloc
1212 and free a copy of the entire write request for breakpoint
1213 shadow handling even though we only end up writing a small
1214 subset of it. Cap writes to 4KB to mitigate this. */
1215 len = min (4096, len);
1216
1217 buf = xmalloc (len);
1218 old_chain = make_cleanup (xfree, buf);
1219 memcpy (buf, writebuf, len);
1220
1221 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1222 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1223 xfered_len);
1224
1225 do_cleanups (old_chain);
1226 }
1227
1228 return res;
1229 }
1230
1231 static void
1232 restore_show_memory_breakpoints (void *arg)
1233 {
1234 show_memory_breakpoints = (uintptr_t) arg;
1235 }
1236
1237 struct cleanup *
1238 make_show_memory_breakpoints_cleanup (int show)
1239 {
1240 int current = show_memory_breakpoints;
1241
1242 show_memory_breakpoints = show;
1243 return make_cleanup (restore_show_memory_breakpoints,
1244 (void *) (uintptr_t) current);
1245 }
1246
1247 /* For docs see target.h, to_xfer_partial. */
1248
1249 enum target_xfer_status
1250 target_xfer_partial (struct target_ops *ops,
1251 enum target_object object, const char *annex,
1252 gdb_byte *readbuf, const gdb_byte *writebuf,
1253 ULONGEST offset, ULONGEST len,
1254 ULONGEST *xfered_len)
1255 {
1256 enum target_xfer_status retval;
1257
1258 gdb_assert (ops->to_xfer_partial != NULL);
1259
1260 /* Transfer is done when LEN is zero. */
1261 if (len == 0)
1262 return TARGET_XFER_EOF;
1263
1264 if (writebuf && !may_write_memory)
1265 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1266 core_addr_to_string_nz (offset), plongest (len));
1267
1268 *xfered_len = 0;
1269
1270 /* If this is a memory transfer, let the memory-specific code
1271 have a look at it instead. Memory transfers are more
1272 complicated. */
1273 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1274 || object == TARGET_OBJECT_CODE_MEMORY)
1275 retval = memory_xfer_partial (ops, object, readbuf,
1276 writebuf, offset, len, xfered_len);
1277 else if (object == TARGET_OBJECT_RAW_MEMORY)
1278 {
1279 /* Skip/avoid accessing the target if the memory region
1280 attributes block the access. Check this here instead of in
1281 raw_memory_xfer_partial as otherwise we'd end up checking
1282 this twice in the case of the memory_xfer_partial path is
1283 taken; once before checking the dcache, and another in the
1284 tail call to raw_memory_xfer_partial. */
1285 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1286 NULL))
1287 return TARGET_XFER_E_IO;
1288
1289 /* Request the normal memory object from other layers. */
1290 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1291 xfered_len);
1292 }
1293 else
1294 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1295 writebuf, offset, len, xfered_len);
1296
1297 if (targetdebug)
1298 {
1299 const unsigned char *myaddr = NULL;
1300
1301 fprintf_unfiltered (gdb_stdlog,
1302 "%s:target_xfer_partial "
1303 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1304 ops->to_shortname,
1305 (int) object,
1306 (annex ? annex : "(null)"),
1307 host_address_to_string (readbuf),
1308 host_address_to_string (writebuf),
1309 core_addr_to_string_nz (offset),
1310 pulongest (len), retval,
1311 pulongest (*xfered_len));
1312
1313 if (readbuf)
1314 myaddr = readbuf;
1315 if (writebuf)
1316 myaddr = writebuf;
1317 if (retval == TARGET_XFER_OK && myaddr != NULL)
1318 {
1319 int i;
1320
1321 fputs_unfiltered (", bytes =", gdb_stdlog);
1322 for (i = 0; i < *xfered_len; i++)
1323 {
1324 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1325 {
1326 if (targetdebug < 2 && i > 0)
1327 {
1328 fprintf_unfiltered (gdb_stdlog, " ...");
1329 break;
1330 }
1331 fprintf_unfiltered (gdb_stdlog, "\n");
1332 }
1333
1334 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1335 }
1336 }
1337
1338 fputc_unfiltered ('\n', gdb_stdlog);
1339 }
1340
1341 /* Check implementations of to_xfer_partial update *XFERED_LEN
1342 properly. Do assertion after printing debug messages, so that we
1343 can find more clues on assertion failure from debugging messages. */
1344 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1345 gdb_assert (*xfered_len > 0);
1346
1347 return retval;
1348 }
1349
1350 /* Read LEN bytes of target memory at address MEMADDR, placing the
1351 results in GDB's memory at MYADDR. Returns either 0 for success or
1352 TARGET_XFER_E_IO if any error occurs.
1353
1354 If an error occurs, no guarantee is made about the contents of the data at
1355 MYADDR. In particular, the caller should not depend upon partial reads
1356 filling the buffer with good data. There is no way for the caller to know
1357 how much good data might have been transfered anyway. Callers that can
1358 deal with partial reads should call target_read (which will retry until
1359 it makes no progress, and then return how much was transferred). */
1360
1361 int
1362 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1363 {
1364 /* Dispatch to the topmost target, not the flattened current_target.
1365 Memory accesses check target->to_has_(all_)memory, and the
1366 flattened target doesn't inherit those. */
1367 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1368 myaddr, memaddr, len) == len)
1369 return 0;
1370 else
1371 return TARGET_XFER_E_IO;
1372 }
1373
1374 /* See target/target.h. */
1375
1376 int
1377 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1378 {
1379 gdb_byte buf[4];
1380 int r;
1381
1382 r = target_read_memory (memaddr, buf, sizeof buf);
1383 if (r != 0)
1384 return r;
1385 *result = extract_unsigned_integer (buf, sizeof buf,
1386 gdbarch_byte_order (target_gdbarch ()));
1387 return 0;
1388 }
1389
1390 /* Like target_read_memory, but specify explicitly that this is a read
1391 from the target's raw memory. That is, this read bypasses the
1392 dcache, breakpoint shadowing, etc. */
1393
1394 int
1395 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1396 {
1397 /* See comment in target_read_memory about why the request starts at
1398 current_target.beneath. */
1399 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1400 myaddr, memaddr, len) == len)
1401 return 0;
1402 else
1403 return TARGET_XFER_E_IO;
1404 }
1405
1406 /* Like target_read_memory, but specify explicitly that this is a read from
1407 the target's stack. This may trigger different cache behavior. */
1408
1409 int
1410 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1411 {
1412 /* See comment in target_read_memory about why the request starts at
1413 current_target.beneath. */
1414 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1415 myaddr, memaddr, len) == len)
1416 return 0;
1417 else
1418 return TARGET_XFER_E_IO;
1419 }
1420
1421 /* Like target_read_memory, but specify explicitly that this is a read from
1422 the target's code. This may trigger different cache behavior. */
1423
1424 int
1425 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1426 {
1427 /* See comment in target_read_memory about why the request starts at
1428 current_target.beneath. */
1429 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1430 myaddr, memaddr, len) == len)
1431 return 0;
1432 else
1433 return TARGET_XFER_E_IO;
1434 }
1435
1436 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1437 Returns either 0 for success or TARGET_XFER_E_IO if any
1438 error occurs. If an error occurs, no guarantee is made about how
1439 much data got written. Callers that can deal with partial writes
1440 should call target_write. */
1441
1442 int
1443 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1444 {
1445 /* See comment in target_read_memory about why the request starts at
1446 current_target.beneath. */
1447 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1448 myaddr, memaddr, len) == len)
1449 return 0;
1450 else
1451 return TARGET_XFER_E_IO;
1452 }
1453
1454 /* Write LEN bytes from MYADDR to target raw memory at address
1455 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1456 if any error occurs. If an error occurs, no guarantee is made
1457 about how much data got written. Callers that can deal with
1458 partial writes should call target_write. */
1459
1460 int
1461 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1462 {
1463 /* See comment in target_read_memory about why the request starts at
1464 current_target.beneath. */
1465 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1466 myaddr, memaddr, len) == len)
1467 return 0;
1468 else
1469 return TARGET_XFER_E_IO;
1470 }
1471
1472 /* Fetch the target's memory map. */
1473
1474 VEC(mem_region_s) *
1475 target_memory_map (void)
1476 {
1477 VEC(mem_region_s) *result;
1478 struct mem_region *last_one, *this_one;
1479 int ix;
1480 struct target_ops *t;
1481
1482 result = current_target.to_memory_map (&current_target);
1483 if (result == NULL)
1484 return NULL;
1485
1486 qsort (VEC_address (mem_region_s, result),
1487 VEC_length (mem_region_s, result),
1488 sizeof (struct mem_region), mem_region_cmp);
1489
1490 /* Check that regions do not overlap. Simultaneously assign
1491 a numbering for the "mem" commands to use to refer to
1492 each region. */
1493 last_one = NULL;
1494 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1495 {
1496 this_one->number = ix;
1497
1498 if (last_one && last_one->hi > this_one->lo)
1499 {
1500 warning (_("Overlapping regions in memory map: ignoring"));
1501 VEC_free (mem_region_s, result);
1502 return NULL;
1503 }
1504 last_one = this_one;
1505 }
1506
1507 return result;
1508 }
1509
1510 void
1511 target_flash_erase (ULONGEST address, LONGEST length)
1512 {
1513 current_target.to_flash_erase (&current_target, address, length);
1514 }
1515
1516 void
1517 target_flash_done (void)
1518 {
1519 current_target.to_flash_done (&current_target);
1520 }
1521
1522 static void
1523 show_trust_readonly (struct ui_file *file, int from_tty,
1524 struct cmd_list_element *c, const char *value)
1525 {
1526 fprintf_filtered (file,
1527 _("Mode for reading from readonly sections is %s.\n"),
1528 value);
1529 }
1530
1531 /* Target vector read/write partial wrapper functions. */
1532
1533 static enum target_xfer_status
1534 target_read_partial (struct target_ops *ops,
1535 enum target_object object,
1536 const char *annex, gdb_byte *buf,
1537 ULONGEST offset, ULONGEST len,
1538 ULONGEST *xfered_len)
1539 {
1540 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1541 xfered_len);
1542 }
1543
1544 static enum target_xfer_status
1545 target_write_partial (struct target_ops *ops,
1546 enum target_object object,
1547 const char *annex, const gdb_byte *buf,
1548 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1549 {
1550 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1551 xfered_len);
1552 }
1553
1554 /* Wrappers to perform the full transfer. */
1555
1556 /* For docs on target_read see target.h. */
1557
1558 LONGEST
1559 target_read (struct target_ops *ops,
1560 enum target_object object,
1561 const char *annex, gdb_byte *buf,
1562 ULONGEST offset, LONGEST len)
1563 {
1564 LONGEST xfered = 0;
1565
1566 while (xfered < len)
1567 {
1568 ULONGEST xfered_len;
1569 enum target_xfer_status status;
1570
1571 status = target_read_partial (ops, object, annex,
1572 (gdb_byte *) buf + xfered,
1573 offset + xfered, len - xfered,
1574 &xfered_len);
1575
1576 /* Call an observer, notifying them of the xfer progress? */
1577 if (status == TARGET_XFER_EOF)
1578 return xfered;
1579 else if (status == TARGET_XFER_OK)
1580 {
1581 xfered += xfered_len;
1582 QUIT;
1583 }
1584 else
1585 return -1;
1586
1587 }
1588 return len;
1589 }
1590
1591 /* Assuming that the entire [begin, end) range of memory cannot be
1592 read, try to read whatever subrange is possible to read.
1593
1594 The function returns, in RESULT, either zero or one memory block.
1595 If there's a readable subrange at the beginning, it is completely
1596 read and returned. Any further readable subrange will not be read.
1597 Otherwise, if there's a readable subrange at the end, it will be
1598 completely read and returned. Any readable subranges before it
1599 (obviously, not starting at the beginning), will be ignored. In
1600 other cases -- either no readable subrange, or readable subrange(s)
1601 that is neither at the beginning, or end, nothing is returned.
1602
1603 The purpose of this function is to handle a read across a boundary
1604 of accessible memory in a case when memory map is not available.
1605 The above restrictions are fine for this case, but will give
1606 incorrect results if the memory is 'patchy'. However, supporting
1607 'patchy' memory would require trying to read every single byte,
1608 and it seems unacceptable solution. Explicit memory map is
1609 recommended for this case -- and target_read_memory_robust will
1610 take care of reading multiple ranges then. */
1611
1612 static void
1613 read_whatever_is_readable (struct target_ops *ops,
1614 ULONGEST begin, ULONGEST end,
1615 VEC(memory_read_result_s) **result)
1616 {
1617 gdb_byte *buf = xmalloc (end - begin);
1618 ULONGEST current_begin = begin;
1619 ULONGEST current_end = end;
1620 int forward;
1621 memory_read_result_s r;
1622 ULONGEST xfered_len;
1623
1624 /* If we previously failed to read 1 byte, nothing can be done here. */
1625 if (end - begin <= 1)
1626 {
1627 xfree (buf);
1628 return;
1629 }
1630
1631 /* Check that either first or the last byte is readable, and give up
1632 if not. This heuristic is meant to permit reading accessible memory
1633 at the boundary of accessible region. */
1634 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1635 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1636 {
1637 forward = 1;
1638 ++current_begin;
1639 }
1640 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1641 buf + (end-begin) - 1, end - 1, 1,
1642 &xfered_len) == TARGET_XFER_OK)
1643 {
1644 forward = 0;
1645 --current_end;
1646 }
1647 else
1648 {
1649 xfree (buf);
1650 return;
1651 }
1652
1653 /* Loop invariant is that the [current_begin, current_end) was previously
1654 found to be not readable as a whole.
1655
1656 Note loop condition -- if the range has 1 byte, we can't divide the range
1657 so there's no point trying further. */
1658 while (current_end - current_begin > 1)
1659 {
1660 ULONGEST first_half_begin, first_half_end;
1661 ULONGEST second_half_begin, second_half_end;
1662 LONGEST xfer;
1663 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1664
1665 if (forward)
1666 {
1667 first_half_begin = current_begin;
1668 first_half_end = middle;
1669 second_half_begin = middle;
1670 second_half_end = current_end;
1671 }
1672 else
1673 {
1674 first_half_begin = middle;
1675 first_half_end = current_end;
1676 second_half_begin = current_begin;
1677 second_half_end = middle;
1678 }
1679
1680 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1681 buf + (first_half_begin - begin),
1682 first_half_begin,
1683 first_half_end - first_half_begin);
1684
1685 if (xfer == first_half_end - first_half_begin)
1686 {
1687 /* This half reads up fine. So, the error must be in the
1688 other half. */
1689 current_begin = second_half_begin;
1690 current_end = second_half_end;
1691 }
1692 else
1693 {
1694 /* This half is not readable. Because we've tried one byte, we
1695 know some part of this half if actually redable. Go to the next
1696 iteration to divide again and try to read.
1697
1698 We don't handle the other half, because this function only tries
1699 to read a single readable subrange. */
1700 current_begin = first_half_begin;
1701 current_end = first_half_end;
1702 }
1703 }
1704
1705 if (forward)
1706 {
1707 /* The [begin, current_begin) range has been read. */
1708 r.begin = begin;
1709 r.end = current_begin;
1710 r.data = buf;
1711 }
1712 else
1713 {
1714 /* The [current_end, end) range has been read. */
1715 LONGEST rlen = end - current_end;
1716
1717 r.data = xmalloc (rlen);
1718 memcpy (r.data, buf + current_end - begin, rlen);
1719 r.begin = current_end;
1720 r.end = end;
1721 xfree (buf);
1722 }
1723 VEC_safe_push(memory_read_result_s, (*result), &r);
1724 }
1725
1726 void
1727 free_memory_read_result_vector (void *x)
1728 {
1729 VEC(memory_read_result_s) *v = x;
1730 memory_read_result_s *current;
1731 int ix;
1732
1733 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1734 {
1735 xfree (current->data);
1736 }
1737 VEC_free (memory_read_result_s, v);
1738 }
1739
1740 VEC(memory_read_result_s) *
1741 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1742 {
1743 VEC(memory_read_result_s) *result = 0;
1744
1745 LONGEST xfered = 0;
1746 while (xfered < len)
1747 {
1748 struct mem_region *region = lookup_mem_region (offset + xfered);
1749 LONGEST rlen;
1750
1751 /* If there is no explicit region, a fake one should be created. */
1752 gdb_assert (region);
1753
1754 if (region->hi == 0)
1755 rlen = len - xfered;
1756 else
1757 rlen = region->hi - offset;
1758
1759 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1760 {
1761 /* Cannot read this region. Note that we can end up here only
1762 if the region is explicitly marked inaccessible, or
1763 'inaccessible-by-default' is in effect. */
1764 xfered += rlen;
1765 }
1766 else
1767 {
1768 LONGEST to_read = min (len - xfered, rlen);
1769 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1770
1771 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1772 (gdb_byte *) buffer,
1773 offset + xfered, to_read);
1774 /* Call an observer, notifying them of the xfer progress? */
1775 if (xfer <= 0)
1776 {
1777 /* Got an error reading full chunk. See if maybe we can read
1778 some subrange. */
1779 xfree (buffer);
1780 read_whatever_is_readable (ops, offset + xfered,
1781 offset + xfered + to_read, &result);
1782 xfered += to_read;
1783 }
1784 else
1785 {
1786 struct memory_read_result r;
1787 r.data = buffer;
1788 r.begin = offset + xfered;
1789 r.end = r.begin + xfer;
1790 VEC_safe_push (memory_read_result_s, result, &r);
1791 xfered += xfer;
1792 }
1793 QUIT;
1794 }
1795 }
1796 return result;
1797 }
1798
1799
1800 /* An alternative to target_write with progress callbacks. */
1801
1802 LONGEST
1803 target_write_with_progress (struct target_ops *ops,
1804 enum target_object object,
1805 const char *annex, const gdb_byte *buf,
1806 ULONGEST offset, LONGEST len,
1807 void (*progress) (ULONGEST, void *), void *baton)
1808 {
1809 LONGEST xfered = 0;
1810
1811 /* Give the progress callback a chance to set up. */
1812 if (progress)
1813 (*progress) (0, baton);
1814
1815 while (xfered < len)
1816 {
1817 ULONGEST xfered_len;
1818 enum target_xfer_status status;
1819
1820 status = target_write_partial (ops, object, annex,
1821 (gdb_byte *) buf + xfered,
1822 offset + xfered, len - xfered,
1823 &xfered_len);
1824
1825 if (status != TARGET_XFER_OK)
1826 return status == TARGET_XFER_EOF ? xfered : -1;
1827
1828 if (progress)
1829 (*progress) (xfered_len, baton);
1830
1831 xfered += xfered_len;
1832 QUIT;
1833 }
1834 return len;
1835 }
1836
1837 /* For docs on target_write see target.h. */
1838
1839 LONGEST
1840 target_write (struct target_ops *ops,
1841 enum target_object object,
1842 const char *annex, const gdb_byte *buf,
1843 ULONGEST offset, LONGEST len)
1844 {
1845 return target_write_with_progress (ops, object, annex, buf, offset, len,
1846 NULL, NULL);
1847 }
1848
1849 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1850 the size of the transferred data. PADDING additional bytes are
1851 available in *BUF_P. This is a helper function for
1852 target_read_alloc; see the declaration of that function for more
1853 information. */
1854
1855 static LONGEST
1856 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1857 const char *annex, gdb_byte **buf_p, int padding)
1858 {
1859 size_t buf_alloc, buf_pos;
1860 gdb_byte *buf;
1861
1862 /* This function does not have a length parameter; it reads the
1863 entire OBJECT). Also, it doesn't support objects fetched partly
1864 from one target and partly from another (in a different stratum,
1865 e.g. a core file and an executable). Both reasons make it
1866 unsuitable for reading memory. */
1867 gdb_assert (object != TARGET_OBJECT_MEMORY);
1868
1869 /* Start by reading up to 4K at a time. The target will throttle
1870 this number down if necessary. */
1871 buf_alloc = 4096;
1872 buf = xmalloc (buf_alloc);
1873 buf_pos = 0;
1874 while (1)
1875 {
1876 ULONGEST xfered_len;
1877 enum target_xfer_status status;
1878
1879 status = target_read_partial (ops, object, annex, &buf[buf_pos],
1880 buf_pos, buf_alloc - buf_pos - padding,
1881 &xfered_len);
1882
1883 if (status == TARGET_XFER_EOF)
1884 {
1885 /* Read all there was. */
1886 if (buf_pos == 0)
1887 xfree (buf);
1888 else
1889 *buf_p = buf;
1890 return buf_pos;
1891 }
1892 else if (status != TARGET_XFER_OK)
1893 {
1894 /* An error occurred. */
1895 xfree (buf);
1896 return TARGET_XFER_E_IO;
1897 }
1898
1899 buf_pos += xfered_len;
1900
1901 /* If the buffer is filling up, expand it. */
1902 if (buf_alloc < buf_pos * 2)
1903 {
1904 buf_alloc *= 2;
1905 buf = xrealloc (buf, buf_alloc);
1906 }
1907
1908 QUIT;
1909 }
1910 }
1911
1912 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1913 the size of the transferred data. See the declaration in "target.h"
1914 function for more information about the return value. */
1915
1916 LONGEST
1917 target_read_alloc (struct target_ops *ops, enum target_object object,
1918 const char *annex, gdb_byte **buf_p)
1919 {
1920 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1921 }
1922
1923 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1924 returned as a string, allocated using xmalloc. If an error occurs
1925 or the transfer is unsupported, NULL is returned. Empty objects
1926 are returned as allocated but empty strings. A warning is issued
1927 if the result contains any embedded NUL bytes. */
1928
1929 char *
1930 target_read_stralloc (struct target_ops *ops, enum target_object object,
1931 const char *annex)
1932 {
1933 gdb_byte *buffer;
1934 char *bufstr;
1935 LONGEST i, transferred;
1936
1937 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1938 bufstr = (char *) buffer;
1939
1940 if (transferred < 0)
1941 return NULL;
1942
1943 if (transferred == 0)
1944 return xstrdup ("");
1945
1946 bufstr[transferred] = 0;
1947
1948 /* Check for embedded NUL bytes; but allow trailing NULs. */
1949 for (i = strlen (bufstr); i < transferred; i++)
1950 if (bufstr[i] != 0)
1951 {
1952 warning (_("target object %d, annex %s, "
1953 "contained unexpected null characters"),
1954 (int) object, annex ? annex : "(none)");
1955 break;
1956 }
1957
1958 return bufstr;
1959 }
1960
1961 /* Memory transfer methods. */
1962
1963 void
1964 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1965 LONGEST len)
1966 {
1967 /* This method is used to read from an alternate, non-current
1968 target. This read must bypass the overlay support (as symbols
1969 don't match this target), and GDB's internal cache (wrong cache
1970 for this target). */
1971 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1972 != len)
1973 memory_error (TARGET_XFER_E_IO, addr);
1974 }
1975
1976 ULONGEST
1977 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1978 int len, enum bfd_endian byte_order)
1979 {
1980 gdb_byte buf[sizeof (ULONGEST)];
1981
1982 gdb_assert (len <= sizeof (buf));
1983 get_target_memory (ops, addr, buf, len);
1984 return extract_unsigned_integer (buf, len, byte_order);
1985 }
1986
1987 /* See target.h. */
1988
1989 int
1990 target_insert_breakpoint (struct gdbarch *gdbarch,
1991 struct bp_target_info *bp_tgt)
1992 {
1993 if (!may_insert_breakpoints)
1994 {
1995 warning (_("May not insert breakpoints"));
1996 return 1;
1997 }
1998
1999 return current_target.to_insert_breakpoint (&current_target,
2000 gdbarch, bp_tgt);
2001 }
2002
2003 /* See target.h. */
2004
2005 int
2006 target_remove_breakpoint (struct gdbarch *gdbarch,
2007 struct bp_target_info *bp_tgt)
2008 {
2009 /* This is kind of a weird case to handle, but the permission might
2010 have been changed after breakpoints were inserted - in which case
2011 we should just take the user literally and assume that any
2012 breakpoints should be left in place. */
2013 if (!may_insert_breakpoints)
2014 {
2015 warning (_("May not remove breakpoints"));
2016 return 1;
2017 }
2018
2019 return current_target.to_remove_breakpoint (&current_target,
2020 gdbarch, bp_tgt);
2021 }
2022
2023 static void
2024 target_info (char *args, int from_tty)
2025 {
2026 struct target_ops *t;
2027 int has_all_mem = 0;
2028
2029 if (symfile_objfile != NULL)
2030 printf_unfiltered (_("Symbols from \"%s\".\n"),
2031 objfile_name (symfile_objfile));
2032
2033 for (t = target_stack; t != NULL; t = t->beneath)
2034 {
2035 if (!(*t->to_has_memory) (t))
2036 continue;
2037
2038 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2039 continue;
2040 if (has_all_mem)
2041 printf_unfiltered (_("\tWhile running this, "
2042 "GDB does not access memory from...\n"));
2043 printf_unfiltered ("%s:\n", t->to_longname);
2044 (t->to_files_info) (t);
2045 has_all_mem = (*t->to_has_all_memory) (t);
2046 }
2047 }
2048
2049 /* This function is called before any new inferior is created, e.g.
2050 by running a program, attaching, or connecting to a target.
2051 It cleans up any state from previous invocations which might
2052 change between runs. This is a subset of what target_preopen
2053 resets (things which might change between targets). */
2054
2055 void
2056 target_pre_inferior (int from_tty)
2057 {
2058 /* Clear out solib state. Otherwise the solib state of the previous
2059 inferior might have survived and is entirely wrong for the new
2060 target. This has been observed on GNU/Linux using glibc 2.3. How
2061 to reproduce:
2062
2063 bash$ ./foo&
2064 [1] 4711
2065 bash$ ./foo&
2066 [1] 4712
2067 bash$ gdb ./foo
2068 [...]
2069 (gdb) attach 4711
2070 (gdb) detach
2071 (gdb) attach 4712
2072 Cannot access memory at address 0xdeadbeef
2073 */
2074
2075 /* In some OSs, the shared library list is the same/global/shared
2076 across inferiors. If code is shared between processes, so are
2077 memory regions and features. */
2078 if (!gdbarch_has_global_solist (target_gdbarch ()))
2079 {
2080 no_shared_libraries (NULL, from_tty);
2081
2082 invalidate_target_mem_regions ();
2083
2084 target_clear_description ();
2085 }
2086
2087 agent_capability_invalidate ();
2088 }
2089
2090 /* Callback for iterate_over_inferiors. Gets rid of the given
2091 inferior. */
2092
2093 static int
2094 dispose_inferior (struct inferior *inf, void *args)
2095 {
2096 struct thread_info *thread;
2097
2098 thread = any_thread_of_process (inf->pid);
2099 if (thread)
2100 {
2101 switch_to_thread (thread->ptid);
2102
2103 /* Core inferiors actually should be detached, not killed. */
2104 if (target_has_execution)
2105 target_kill ();
2106 else
2107 target_detach (NULL, 0);
2108 }
2109
2110 return 0;
2111 }
2112
2113 /* This is to be called by the open routine before it does
2114 anything. */
2115
2116 void
2117 target_preopen (int from_tty)
2118 {
2119 dont_repeat ();
2120
2121 if (have_inferiors ())
2122 {
2123 if (!from_tty
2124 || !have_live_inferiors ()
2125 || query (_("A program is being debugged already. Kill it? ")))
2126 iterate_over_inferiors (dispose_inferior, NULL);
2127 else
2128 error (_("Program not killed."));
2129 }
2130
2131 /* Calling target_kill may remove the target from the stack. But if
2132 it doesn't (which seems like a win for UDI), remove it now. */
2133 /* Leave the exec target, though. The user may be switching from a
2134 live process to a core of the same program. */
2135 pop_all_targets_above (file_stratum);
2136
2137 target_pre_inferior (from_tty);
2138 }
2139
2140 /* Detach a target after doing deferred register stores. */
2141
2142 void
2143 target_detach (const char *args, int from_tty)
2144 {
2145 struct target_ops* t;
2146
2147 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2148 /* Don't remove global breakpoints here. They're removed on
2149 disconnection from the target. */
2150 ;
2151 else
2152 /* If we're in breakpoints-always-inserted mode, have to remove
2153 them before detaching. */
2154 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2155
2156 prepare_for_detach ();
2157
2158 current_target.to_detach (&current_target, args, from_tty);
2159 }
2160
2161 void
2162 target_disconnect (const char *args, int from_tty)
2163 {
2164 /* If we're in breakpoints-always-inserted mode or if breakpoints
2165 are global across processes, we have to remove them before
2166 disconnecting. */
2167 remove_breakpoints ();
2168
2169 current_target.to_disconnect (&current_target, args, from_tty);
2170 }
2171
2172 ptid_t
2173 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2174 {
2175 return (current_target.to_wait) (&current_target, ptid, status, options);
2176 }
2177
2178 char *
2179 target_pid_to_str (ptid_t ptid)
2180 {
2181 return (*current_target.to_pid_to_str) (&current_target, ptid);
2182 }
2183
2184 char *
2185 target_thread_name (struct thread_info *info)
2186 {
2187 return current_target.to_thread_name (&current_target, info);
2188 }
2189
2190 void
2191 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2192 {
2193 struct target_ops *t;
2194
2195 target_dcache_invalidate ();
2196
2197 current_target.to_resume (&current_target, ptid, step, signal);
2198
2199 registers_changed_ptid (ptid);
2200 /* We only set the internal executing state here. The user/frontend
2201 running state is set at a higher level. */
2202 set_executing (ptid, 1);
2203 clear_inline_frame_state (ptid);
2204 }
2205
2206 void
2207 target_pass_signals (int numsigs, unsigned char *pass_signals)
2208 {
2209 (*current_target.to_pass_signals) (&current_target, numsigs, pass_signals);
2210 }
2211
2212 void
2213 target_program_signals (int numsigs, unsigned char *program_signals)
2214 {
2215 (*current_target.to_program_signals) (&current_target,
2216 numsigs, program_signals);
2217 }
2218
2219 static int
2220 default_follow_fork (struct target_ops *self, int follow_child,
2221 int detach_fork)
2222 {
2223 /* Some target returned a fork event, but did not know how to follow it. */
2224 internal_error (__FILE__, __LINE__,
2225 _("could not find a target to follow fork"));
2226 }
2227
2228 /* Look through the list of possible targets for a target that can
2229 follow forks. */
2230
2231 int
2232 target_follow_fork (int follow_child, int detach_fork)
2233 {
2234 return current_target.to_follow_fork (&current_target,
2235 follow_child, detach_fork);
2236 }
2237
2238 static void
2239 default_mourn_inferior (struct target_ops *self)
2240 {
2241 internal_error (__FILE__, __LINE__,
2242 _("could not find a target to follow mourn inferior"));
2243 }
2244
2245 void
2246 target_mourn_inferior (void)
2247 {
2248 current_target.to_mourn_inferior (&current_target);
2249
2250 /* We no longer need to keep handles on any of the object files.
2251 Make sure to release them to avoid unnecessarily locking any
2252 of them while we're not actually debugging. */
2253 bfd_cache_close_all ();
2254 }
2255
2256 /* Look for a target which can describe architectural features, starting
2257 from TARGET. If we find one, return its description. */
2258
2259 const struct target_desc *
2260 target_read_description (struct target_ops *target)
2261 {
2262 return target->to_read_description (target);
2263 }
2264
2265 /* This implements a basic search of memory, reading target memory and
2266 performing the search here (as opposed to performing the search in on the
2267 target side with, for example, gdbserver). */
2268
2269 int
2270 simple_search_memory (struct target_ops *ops,
2271 CORE_ADDR start_addr, ULONGEST search_space_len,
2272 const gdb_byte *pattern, ULONGEST pattern_len,
2273 CORE_ADDR *found_addrp)
2274 {
2275 /* NOTE: also defined in find.c testcase. */
2276 #define SEARCH_CHUNK_SIZE 16000
2277 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2278 /* Buffer to hold memory contents for searching. */
2279 gdb_byte *search_buf;
2280 unsigned search_buf_size;
2281 struct cleanup *old_cleanups;
2282
2283 search_buf_size = chunk_size + pattern_len - 1;
2284
2285 /* No point in trying to allocate a buffer larger than the search space. */
2286 if (search_space_len < search_buf_size)
2287 search_buf_size = search_space_len;
2288
2289 search_buf = malloc (search_buf_size);
2290 if (search_buf == NULL)
2291 error (_("Unable to allocate memory to perform the search."));
2292 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2293
2294 /* Prime the search buffer. */
2295
2296 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2297 search_buf, start_addr, search_buf_size) != search_buf_size)
2298 {
2299 warning (_("Unable to access %s bytes of target "
2300 "memory at %s, halting search."),
2301 pulongest (search_buf_size), hex_string (start_addr));
2302 do_cleanups (old_cleanups);
2303 return -1;
2304 }
2305
2306 /* Perform the search.
2307
2308 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2309 When we've scanned N bytes we copy the trailing bytes to the start and
2310 read in another N bytes. */
2311
2312 while (search_space_len >= pattern_len)
2313 {
2314 gdb_byte *found_ptr;
2315 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2316
2317 found_ptr = memmem (search_buf, nr_search_bytes,
2318 pattern, pattern_len);
2319
2320 if (found_ptr != NULL)
2321 {
2322 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2323
2324 *found_addrp = found_addr;
2325 do_cleanups (old_cleanups);
2326 return 1;
2327 }
2328
2329 /* Not found in this chunk, skip to next chunk. */
2330
2331 /* Don't let search_space_len wrap here, it's unsigned. */
2332 if (search_space_len >= chunk_size)
2333 search_space_len -= chunk_size;
2334 else
2335 search_space_len = 0;
2336
2337 if (search_space_len >= pattern_len)
2338 {
2339 unsigned keep_len = search_buf_size - chunk_size;
2340 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2341 int nr_to_read;
2342
2343 /* Copy the trailing part of the previous iteration to the front
2344 of the buffer for the next iteration. */
2345 gdb_assert (keep_len == pattern_len - 1);
2346 memcpy (search_buf, search_buf + chunk_size, keep_len);
2347
2348 nr_to_read = min (search_space_len - keep_len, chunk_size);
2349
2350 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2351 search_buf + keep_len, read_addr,
2352 nr_to_read) != nr_to_read)
2353 {
2354 warning (_("Unable to access %s bytes of target "
2355 "memory at %s, halting search."),
2356 plongest (nr_to_read),
2357 hex_string (read_addr));
2358 do_cleanups (old_cleanups);
2359 return -1;
2360 }
2361
2362 start_addr += chunk_size;
2363 }
2364 }
2365
2366 /* Not found. */
2367
2368 do_cleanups (old_cleanups);
2369 return 0;
2370 }
2371
2372 /* Default implementation of memory-searching. */
2373
2374 static int
2375 default_search_memory (struct target_ops *self,
2376 CORE_ADDR start_addr, ULONGEST search_space_len,
2377 const gdb_byte *pattern, ULONGEST pattern_len,
2378 CORE_ADDR *found_addrp)
2379 {
2380 /* Start over from the top of the target stack. */
2381 return simple_search_memory (current_target.beneath,
2382 start_addr, search_space_len,
2383 pattern, pattern_len, found_addrp);
2384 }
2385
2386 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2387 sequence of bytes in PATTERN with length PATTERN_LEN.
2388
2389 The result is 1 if found, 0 if not found, and -1 if there was an error
2390 requiring halting of the search (e.g. memory read error).
2391 If the pattern is found the address is recorded in FOUND_ADDRP. */
2392
2393 int
2394 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2395 const gdb_byte *pattern, ULONGEST pattern_len,
2396 CORE_ADDR *found_addrp)
2397 {
2398 return current_target.to_search_memory (&current_target, start_addr,
2399 search_space_len,
2400 pattern, pattern_len, found_addrp);
2401 }
2402
2403 /* Look through the currently pushed targets. If none of them will
2404 be able to restart the currently running process, issue an error
2405 message. */
2406
2407 void
2408 target_require_runnable (void)
2409 {
2410 struct target_ops *t;
2411
2412 for (t = target_stack; t != NULL; t = t->beneath)
2413 {
2414 /* If this target knows how to create a new program, then
2415 assume we will still be able to after killing the current
2416 one. Either killing and mourning will not pop T, or else
2417 find_default_run_target will find it again. */
2418 if (t->to_create_inferior != NULL)
2419 return;
2420
2421 /* Do not worry about targets at certain strata that can not
2422 create inferiors. Assume they will be pushed again if
2423 necessary, and continue to the process_stratum. */
2424 if (t->to_stratum == thread_stratum
2425 || t->to_stratum == record_stratum
2426 || t->to_stratum == arch_stratum)
2427 continue;
2428
2429 error (_("The \"%s\" target does not support \"run\". "
2430 "Try \"help target\" or \"continue\"."),
2431 t->to_shortname);
2432 }
2433
2434 /* This function is only called if the target is running. In that
2435 case there should have been a process_stratum target and it
2436 should either know how to create inferiors, or not... */
2437 internal_error (__FILE__, __LINE__, _("No targets found"));
2438 }
2439
2440 /* Whether GDB is allowed to fall back to the default run target for
2441 "run", "attach", etc. when no target is connected yet. */
2442 static int auto_connect_native_target = 1;
2443
2444 static void
2445 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2446 struct cmd_list_element *c, const char *value)
2447 {
2448 fprintf_filtered (file,
2449 _("Whether GDB may automatically connect to the "
2450 "native target is %s.\n"),
2451 value);
2452 }
2453
2454 /* Look through the list of possible targets for a target that can
2455 execute a run or attach command without any other data. This is
2456 used to locate the default process stratum.
2457
2458 If DO_MESG is not NULL, the result is always valid (error() is
2459 called for errors); else, return NULL on error. */
2460
2461 static struct target_ops *
2462 find_default_run_target (char *do_mesg)
2463 {
2464 struct target_ops *runable = NULL;
2465
2466 if (auto_connect_native_target)
2467 {
2468 struct target_ops *t;
2469 int count = 0;
2470 int i;
2471
2472 for (i = 0; VEC_iterate (target_ops_p, target_structs, i, t); ++i)
2473 {
2474 if (t->to_can_run != delegate_can_run && target_can_run (t))
2475 {
2476 runable = t;
2477 ++count;
2478 }
2479 }
2480
2481 if (count != 1)
2482 runable = NULL;
2483 }
2484
2485 if (runable == NULL)
2486 {
2487 if (do_mesg)
2488 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2489 else
2490 return NULL;
2491 }
2492
2493 return runable;
2494 }
2495
2496 /* See target.h. */
2497
2498 struct target_ops *
2499 find_attach_target (void)
2500 {
2501 struct target_ops *t;
2502
2503 /* If a target on the current stack can attach, use it. */
2504 for (t = current_target.beneath; t != NULL; t = t->beneath)
2505 {
2506 if (t->to_attach != NULL)
2507 break;
2508 }
2509
2510 /* Otherwise, use the default run target for attaching. */
2511 if (t == NULL)
2512 t = find_default_run_target ("attach");
2513
2514 return t;
2515 }
2516
2517 /* See target.h. */
2518
2519 struct target_ops *
2520 find_run_target (void)
2521 {
2522 struct target_ops *t;
2523
2524 /* If a target on the current stack can attach, use it. */
2525 for (t = current_target.beneath; t != NULL; t = t->beneath)
2526 {
2527 if (t->to_create_inferior != NULL)
2528 break;
2529 }
2530
2531 /* Otherwise, use the default run target. */
2532 if (t == NULL)
2533 t = find_default_run_target ("run");
2534
2535 return t;
2536 }
2537
2538 /* Implement the "info proc" command. */
2539
2540 int
2541 target_info_proc (const char *args, enum info_proc_what what)
2542 {
2543 struct target_ops *t;
2544
2545 /* If we're already connected to something that can get us OS
2546 related data, use it. Otherwise, try using the native
2547 target. */
2548 if (current_target.to_stratum >= process_stratum)
2549 t = current_target.beneath;
2550 else
2551 t = find_default_run_target (NULL);
2552
2553 for (; t != NULL; t = t->beneath)
2554 {
2555 if (t->to_info_proc != NULL)
2556 {
2557 t->to_info_proc (t, args, what);
2558
2559 if (targetdebug)
2560 fprintf_unfiltered (gdb_stdlog,
2561 "target_info_proc (\"%s\", %d)\n", args, what);
2562
2563 return 1;
2564 }
2565 }
2566
2567 return 0;
2568 }
2569
2570 static int
2571 find_default_supports_disable_randomization (struct target_ops *self)
2572 {
2573 struct target_ops *t;
2574
2575 t = find_default_run_target (NULL);
2576 if (t && t->to_supports_disable_randomization)
2577 return (t->to_supports_disable_randomization) (t);
2578 return 0;
2579 }
2580
2581 int
2582 target_supports_disable_randomization (void)
2583 {
2584 struct target_ops *t;
2585
2586 for (t = &current_target; t != NULL; t = t->beneath)
2587 if (t->to_supports_disable_randomization)
2588 return t->to_supports_disable_randomization (t);
2589
2590 return 0;
2591 }
2592
2593 char *
2594 target_get_osdata (const char *type)
2595 {
2596 struct target_ops *t;
2597
2598 /* If we're already connected to something that can get us OS
2599 related data, use it. Otherwise, try using the native
2600 target. */
2601 if (current_target.to_stratum >= process_stratum)
2602 t = current_target.beneath;
2603 else
2604 t = find_default_run_target ("get OS data");
2605
2606 if (!t)
2607 return NULL;
2608
2609 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2610 }
2611
2612 static struct address_space *
2613 default_thread_address_space (struct target_ops *self, ptid_t ptid)
2614 {
2615 struct inferior *inf;
2616
2617 /* Fall-back to the "main" address space of the inferior. */
2618 inf = find_inferior_pid (ptid_get_pid (ptid));
2619
2620 if (inf == NULL || inf->aspace == NULL)
2621 internal_error (__FILE__, __LINE__,
2622 _("Can't determine the current "
2623 "address space of thread %s\n"),
2624 target_pid_to_str (ptid));
2625
2626 return inf->aspace;
2627 }
2628
2629 /* Determine the current address space of thread PTID. */
2630
2631 struct address_space *
2632 target_thread_address_space (ptid_t ptid)
2633 {
2634 struct address_space *aspace;
2635
2636 aspace = current_target.to_thread_address_space (&current_target, ptid);
2637 gdb_assert (aspace != NULL);
2638
2639 return aspace;
2640 }
2641
2642
2643 /* Target file operations. */
2644
2645 static struct target_ops *
2646 default_fileio_target (void)
2647 {
2648 /* If we're already connected to something that can perform
2649 file I/O, use it. Otherwise, try using the native target. */
2650 if (current_target.to_stratum >= process_stratum)
2651 return current_target.beneath;
2652 else
2653 return find_default_run_target ("file I/O");
2654 }
2655
2656 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2657 target file descriptor, or -1 if an error occurs (and set
2658 *TARGET_ERRNO). */
2659 int
2660 target_fileio_open (const char *filename, int flags, int mode,
2661 int *target_errno)
2662 {
2663 struct target_ops *t;
2664
2665 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2666 {
2667 if (t->to_fileio_open != NULL)
2668 {
2669 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2670
2671 if (targetdebug)
2672 fprintf_unfiltered (gdb_stdlog,
2673 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2674 filename, flags, mode,
2675 fd, fd != -1 ? 0 : *target_errno);
2676 return fd;
2677 }
2678 }
2679
2680 *target_errno = FILEIO_ENOSYS;
2681 return -1;
2682 }
2683
2684 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2685 Return the number of bytes written, or -1 if an error occurs
2686 (and set *TARGET_ERRNO). */
2687 int
2688 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2689 ULONGEST offset, int *target_errno)
2690 {
2691 struct target_ops *t;
2692
2693 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2694 {
2695 if (t->to_fileio_pwrite != NULL)
2696 {
2697 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2698 target_errno);
2699
2700 if (targetdebug)
2701 fprintf_unfiltered (gdb_stdlog,
2702 "target_fileio_pwrite (%d,...,%d,%s) "
2703 "= %d (%d)\n",
2704 fd, len, pulongest (offset),
2705 ret, ret != -1 ? 0 : *target_errno);
2706 return ret;
2707 }
2708 }
2709
2710 *target_errno = FILEIO_ENOSYS;
2711 return -1;
2712 }
2713
2714 /* Read up to LEN bytes FD on the target into READ_BUF.
2715 Return the number of bytes read, or -1 if an error occurs
2716 (and set *TARGET_ERRNO). */
2717 int
2718 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2719 ULONGEST offset, int *target_errno)
2720 {
2721 struct target_ops *t;
2722
2723 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2724 {
2725 if (t->to_fileio_pread != NULL)
2726 {
2727 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
2728 target_errno);
2729
2730 if (targetdebug)
2731 fprintf_unfiltered (gdb_stdlog,
2732 "target_fileio_pread (%d,...,%d,%s) "
2733 "= %d (%d)\n",
2734 fd, len, pulongest (offset),
2735 ret, ret != -1 ? 0 : *target_errno);
2736 return ret;
2737 }
2738 }
2739
2740 *target_errno = FILEIO_ENOSYS;
2741 return -1;
2742 }
2743
2744 /* Close FD on the target. Return 0, or -1 if an error occurs
2745 (and set *TARGET_ERRNO). */
2746 int
2747 target_fileio_close (int fd, int *target_errno)
2748 {
2749 struct target_ops *t;
2750
2751 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2752 {
2753 if (t->to_fileio_close != NULL)
2754 {
2755 int ret = t->to_fileio_close (t, fd, target_errno);
2756
2757 if (targetdebug)
2758 fprintf_unfiltered (gdb_stdlog,
2759 "target_fileio_close (%d) = %d (%d)\n",
2760 fd, ret, ret != -1 ? 0 : *target_errno);
2761 return ret;
2762 }
2763 }
2764
2765 *target_errno = FILEIO_ENOSYS;
2766 return -1;
2767 }
2768
2769 /* Unlink FILENAME on the target. Return 0, or -1 if an error
2770 occurs (and set *TARGET_ERRNO). */
2771 int
2772 target_fileio_unlink (const char *filename, int *target_errno)
2773 {
2774 struct target_ops *t;
2775
2776 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2777 {
2778 if (t->to_fileio_unlink != NULL)
2779 {
2780 int ret = t->to_fileio_unlink (t, filename, target_errno);
2781
2782 if (targetdebug)
2783 fprintf_unfiltered (gdb_stdlog,
2784 "target_fileio_unlink (%s) = %d (%d)\n",
2785 filename, ret, ret != -1 ? 0 : *target_errno);
2786 return ret;
2787 }
2788 }
2789
2790 *target_errno = FILEIO_ENOSYS;
2791 return -1;
2792 }
2793
2794 /* Read value of symbolic link FILENAME on the target. Return a
2795 null-terminated string allocated via xmalloc, or NULL if an error
2796 occurs (and set *TARGET_ERRNO). */
2797 char *
2798 target_fileio_readlink (const char *filename, int *target_errno)
2799 {
2800 struct target_ops *t;
2801
2802 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2803 {
2804 if (t->to_fileio_readlink != NULL)
2805 {
2806 char *ret = t->to_fileio_readlink (t, filename, target_errno);
2807
2808 if (targetdebug)
2809 fprintf_unfiltered (gdb_stdlog,
2810 "target_fileio_readlink (%s) = %s (%d)\n",
2811 filename, ret? ret : "(nil)",
2812 ret? 0 : *target_errno);
2813 return ret;
2814 }
2815 }
2816
2817 *target_errno = FILEIO_ENOSYS;
2818 return NULL;
2819 }
2820
2821 static void
2822 target_fileio_close_cleanup (void *opaque)
2823 {
2824 int fd = *(int *) opaque;
2825 int target_errno;
2826
2827 target_fileio_close (fd, &target_errno);
2828 }
2829
2830 /* Read target file FILENAME. Store the result in *BUF_P and
2831 return the size of the transferred data. PADDING additional bytes are
2832 available in *BUF_P. This is a helper function for
2833 target_fileio_read_alloc; see the declaration of that function for more
2834 information. */
2835
2836 static LONGEST
2837 target_fileio_read_alloc_1 (const char *filename,
2838 gdb_byte **buf_p, int padding)
2839 {
2840 struct cleanup *close_cleanup;
2841 size_t buf_alloc, buf_pos;
2842 gdb_byte *buf;
2843 LONGEST n;
2844 int fd;
2845 int target_errno;
2846
2847 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
2848 if (fd == -1)
2849 return -1;
2850
2851 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
2852
2853 /* Start by reading up to 4K at a time. The target will throttle
2854 this number down if necessary. */
2855 buf_alloc = 4096;
2856 buf = xmalloc (buf_alloc);
2857 buf_pos = 0;
2858 while (1)
2859 {
2860 n = target_fileio_pread (fd, &buf[buf_pos],
2861 buf_alloc - buf_pos - padding, buf_pos,
2862 &target_errno);
2863 if (n < 0)
2864 {
2865 /* An error occurred. */
2866 do_cleanups (close_cleanup);
2867 xfree (buf);
2868 return -1;
2869 }
2870 else if (n == 0)
2871 {
2872 /* Read all there was. */
2873 do_cleanups (close_cleanup);
2874 if (buf_pos == 0)
2875 xfree (buf);
2876 else
2877 *buf_p = buf;
2878 return buf_pos;
2879 }
2880
2881 buf_pos += n;
2882
2883 /* If the buffer is filling up, expand it. */
2884 if (buf_alloc < buf_pos * 2)
2885 {
2886 buf_alloc *= 2;
2887 buf = xrealloc (buf, buf_alloc);
2888 }
2889
2890 QUIT;
2891 }
2892 }
2893
2894 /* Read target file FILENAME. Store the result in *BUF_P and return
2895 the size of the transferred data. See the declaration in "target.h"
2896 function for more information about the return value. */
2897
2898 LONGEST
2899 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
2900 {
2901 return target_fileio_read_alloc_1 (filename, buf_p, 0);
2902 }
2903
2904 /* Read target file FILENAME. The result is NUL-terminated and
2905 returned as a string, allocated using xmalloc. If an error occurs
2906 or the transfer is unsupported, NULL is returned. Empty objects
2907 are returned as allocated but empty strings. A warning is issued
2908 if the result contains any embedded NUL bytes. */
2909
2910 char *
2911 target_fileio_read_stralloc (const char *filename)
2912 {
2913 gdb_byte *buffer;
2914 char *bufstr;
2915 LONGEST i, transferred;
2916
2917 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
2918 bufstr = (char *) buffer;
2919
2920 if (transferred < 0)
2921 return NULL;
2922
2923 if (transferred == 0)
2924 return xstrdup ("");
2925
2926 bufstr[transferred] = 0;
2927
2928 /* Check for embedded NUL bytes; but allow trailing NULs. */
2929 for (i = strlen (bufstr); i < transferred; i++)
2930 if (bufstr[i] != 0)
2931 {
2932 warning (_("target file %s "
2933 "contained unexpected null characters"),
2934 filename);
2935 break;
2936 }
2937
2938 return bufstr;
2939 }
2940
2941
2942 static int
2943 default_region_ok_for_hw_watchpoint (struct target_ops *self,
2944 CORE_ADDR addr, int len)
2945 {
2946 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
2947 }
2948
2949 static int
2950 default_watchpoint_addr_within_range (struct target_ops *target,
2951 CORE_ADDR addr,
2952 CORE_ADDR start, int length)
2953 {
2954 return addr >= start && addr < start + length;
2955 }
2956
2957 static struct gdbarch *
2958 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2959 {
2960 return target_gdbarch ();
2961 }
2962
2963 static int
2964 return_zero (struct target_ops *ignore)
2965 {
2966 return 0;
2967 }
2968
2969 static int
2970 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
2971 {
2972 return 0;
2973 }
2974
2975 /*
2976 * Find the next target down the stack from the specified target.
2977 */
2978
2979 struct target_ops *
2980 find_target_beneath (struct target_ops *t)
2981 {
2982 return t->beneath;
2983 }
2984
2985 /* See target.h. */
2986
2987 struct target_ops *
2988 find_target_at (enum strata stratum)
2989 {
2990 struct target_ops *t;
2991
2992 for (t = current_target.beneath; t != NULL; t = t->beneath)
2993 if (t->to_stratum == stratum)
2994 return t;
2995
2996 return NULL;
2997 }
2998
2999 \f
3000 /* The inferior process has died. Long live the inferior! */
3001
3002 void
3003 generic_mourn_inferior (void)
3004 {
3005 ptid_t ptid;
3006
3007 ptid = inferior_ptid;
3008 inferior_ptid = null_ptid;
3009
3010 /* Mark breakpoints uninserted in case something tries to delete a
3011 breakpoint while we delete the inferior's threads (which would
3012 fail, since the inferior is long gone). */
3013 mark_breakpoints_out ();
3014
3015 if (!ptid_equal (ptid, null_ptid))
3016 {
3017 int pid = ptid_get_pid (ptid);
3018 exit_inferior (pid);
3019 }
3020
3021 /* Note this wipes step-resume breakpoints, so needs to be done
3022 after exit_inferior, which ends up referencing the step-resume
3023 breakpoints through clear_thread_inferior_resources. */
3024 breakpoint_init_inferior (inf_exited);
3025
3026 registers_changed ();
3027
3028 reopen_exec_file ();
3029 reinit_frame_cache ();
3030
3031 if (deprecated_detach_hook)
3032 deprecated_detach_hook ();
3033 }
3034 \f
3035 /* Convert a normal process ID to a string. Returns the string in a
3036 static buffer. */
3037
3038 char *
3039 normal_pid_to_str (ptid_t ptid)
3040 {
3041 static char buf[32];
3042
3043 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3044 return buf;
3045 }
3046
3047 static char *
3048 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3049 {
3050 return normal_pid_to_str (ptid);
3051 }
3052
3053 /* Error-catcher for target_find_memory_regions. */
3054 static int
3055 dummy_find_memory_regions (struct target_ops *self,
3056 find_memory_region_ftype ignore1, void *ignore2)
3057 {
3058 error (_("Command not implemented for this target."));
3059 return 0;
3060 }
3061
3062 /* Error-catcher for target_make_corefile_notes. */
3063 static char *
3064 dummy_make_corefile_notes (struct target_ops *self,
3065 bfd *ignore1, int *ignore2)
3066 {
3067 error (_("Command not implemented for this target."));
3068 return NULL;
3069 }
3070
3071 /* Set up the handful of non-empty slots needed by the dummy target
3072 vector. */
3073
3074 static void
3075 init_dummy_target (void)
3076 {
3077 dummy_target.to_shortname = "None";
3078 dummy_target.to_longname = "None";
3079 dummy_target.to_doc = "";
3080 dummy_target.to_supports_disable_randomization
3081 = find_default_supports_disable_randomization;
3082 dummy_target.to_stratum = dummy_stratum;
3083 dummy_target.to_has_all_memory = return_zero;
3084 dummy_target.to_has_memory = return_zero;
3085 dummy_target.to_has_stack = return_zero;
3086 dummy_target.to_has_registers = return_zero;
3087 dummy_target.to_has_execution = return_zero_has_execution;
3088 dummy_target.to_magic = OPS_MAGIC;
3089
3090 install_dummy_methods (&dummy_target);
3091 }
3092 \f
3093
3094 void
3095 target_close (struct target_ops *targ)
3096 {
3097 gdb_assert (!target_is_pushed (targ));
3098
3099 if (targ->to_xclose != NULL)
3100 targ->to_xclose (targ);
3101 else if (targ->to_close != NULL)
3102 targ->to_close (targ);
3103
3104 if (targetdebug)
3105 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3106 }
3107
3108 int
3109 target_thread_alive (ptid_t ptid)
3110 {
3111 return current_target.to_thread_alive (&current_target, ptid);
3112 }
3113
3114 void
3115 target_update_thread_list (void)
3116 {
3117 current_target.to_update_thread_list (&current_target);
3118 }
3119
3120 void
3121 target_stop (ptid_t ptid)
3122 {
3123 if (!may_stop)
3124 {
3125 warning (_("May not interrupt or stop the target, ignoring attempt"));
3126 return;
3127 }
3128
3129 (*current_target.to_stop) (&current_target, ptid);
3130 }
3131
3132 /* See target/target.h. */
3133
3134 void
3135 target_stop_and_wait (ptid_t ptid)
3136 {
3137 struct target_waitstatus status;
3138 int was_non_stop = non_stop;
3139
3140 non_stop = 1;
3141 target_stop (ptid);
3142
3143 memset (&status, 0, sizeof (status));
3144 target_wait (ptid, &status, 0);
3145
3146 non_stop = was_non_stop;
3147 }
3148
3149 /* See target/target.h. */
3150
3151 void
3152 target_continue_no_signal (ptid_t ptid)
3153 {
3154 target_resume (ptid, 0, GDB_SIGNAL_0);
3155 }
3156
3157 /* Concatenate ELEM to LIST, a comma separate list, and return the
3158 result. The LIST incoming argument is released. */
3159
3160 static char *
3161 str_comma_list_concat_elem (char *list, const char *elem)
3162 {
3163 if (list == NULL)
3164 return xstrdup (elem);
3165 else
3166 return reconcat (list, list, ", ", elem, (char *) NULL);
3167 }
3168
3169 /* Helper for target_options_to_string. If OPT is present in
3170 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3171 Returns the new resulting string. OPT is removed from
3172 TARGET_OPTIONS. */
3173
3174 static char *
3175 do_option (int *target_options, char *ret,
3176 int opt, char *opt_str)
3177 {
3178 if ((*target_options & opt) != 0)
3179 {
3180 ret = str_comma_list_concat_elem (ret, opt_str);
3181 *target_options &= ~opt;
3182 }
3183
3184 return ret;
3185 }
3186
3187 char *
3188 target_options_to_string (int target_options)
3189 {
3190 char *ret = NULL;
3191
3192 #define DO_TARG_OPTION(OPT) \
3193 ret = do_option (&target_options, ret, OPT, #OPT)
3194
3195 DO_TARG_OPTION (TARGET_WNOHANG);
3196
3197 if (target_options != 0)
3198 ret = str_comma_list_concat_elem (ret, "unknown???");
3199
3200 if (ret == NULL)
3201 ret = xstrdup ("");
3202 return ret;
3203 }
3204
3205 static void
3206 debug_print_register (const char * func,
3207 struct regcache *regcache, int regno)
3208 {
3209 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3210
3211 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3212 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3213 && gdbarch_register_name (gdbarch, regno) != NULL
3214 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3215 fprintf_unfiltered (gdb_stdlog, "(%s)",
3216 gdbarch_register_name (gdbarch, regno));
3217 else
3218 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3219 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3220 {
3221 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3222 int i, size = register_size (gdbarch, regno);
3223 gdb_byte buf[MAX_REGISTER_SIZE];
3224
3225 regcache_raw_collect (regcache, regno, buf);
3226 fprintf_unfiltered (gdb_stdlog, " = ");
3227 for (i = 0; i < size; i++)
3228 {
3229 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3230 }
3231 if (size <= sizeof (LONGEST))
3232 {
3233 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3234
3235 fprintf_unfiltered (gdb_stdlog, " %s %s",
3236 core_addr_to_string_nz (val), plongest (val));
3237 }
3238 }
3239 fprintf_unfiltered (gdb_stdlog, "\n");
3240 }
3241
3242 void
3243 target_fetch_registers (struct regcache *regcache, int regno)
3244 {
3245 current_target.to_fetch_registers (&current_target, regcache, regno);
3246 if (targetdebug)
3247 debug_print_register ("target_fetch_registers", regcache, regno);
3248 }
3249
3250 void
3251 target_store_registers (struct regcache *regcache, int regno)
3252 {
3253 struct target_ops *t;
3254
3255 if (!may_write_registers)
3256 error (_("Writing to registers is not allowed (regno %d)"), regno);
3257
3258 current_target.to_store_registers (&current_target, regcache, regno);
3259 if (targetdebug)
3260 {
3261 debug_print_register ("target_store_registers", regcache, regno);
3262 }
3263 }
3264
3265 int
3266 target_core_of_thread (ptid_t ptid)
3267 {
3268 return current_target.to_core_of_thread (&current_target, ptid);
3269 }
3270
3271 int
3272 simple_verify_memory (struct target_ops *ops,
3273 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3274 {
3275 LONGEST total_xfered = 0;
3276
3277 while (total_xfered < size)
3278 {
3279 ULONGEST xfered_len;
3280 enum target_xfer_status status;
3281 gdb_byte buf[1024];
3282 ULONGEST howmuch = min (sizeof (buf), size - total_xfered);
3283
3284 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3285 buf, NULL, lma + total_xfered, howmuch,
3286 &xfered_len);
3287 if (status == TARGET_XFER_OK
3288 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3289 {
3290 total_xfered += xfered_len;
3291 QUIT;
3292 }
3293 else
3294 return 0;
3295 }
3296 return 1;
3297 }
3298
3299 /* Default implementation of memory verification. */
3300
3301 static int
3302 default_verify_memory (struct target_ops *self,
3303 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3304 {
3305 /* Start over from the top of the target stack. */
3306 return simple_verify_memory (current_target.beneath,
3307 data, memaddr, size);
3308 }
3309
3310 int
3311 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3312 {
3313 return current_target.to_verify_memory (&current_target,
3314 data, memaddr, size);
3315 }
3316
3317 /* The documentation for this function is in its prototype declaration in
3318 target.h. */
3319
3320 int
3321 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3322 {
3323 return current_target.to_insert_mask_watchpoint (&current_target,
3324 addr, mask, rw);
3325 }
3326
3327 /* The documentation for this function is in its prototype declaration in
3328 target.h. */
3329
3330 int
3331 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3332 {
3333 return current_target.to_remove_mask_watchpoint (&current_target,
3334 addr, mask, rw);
3335 }
3336
3337 /* The documentation for this function is in its prototype declaration
3338 in target.h. */
3339
3340 int
3341 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3342 {
3343 return current_target.to_masked_watch_num_registers (&current_target,
3344 addr, mask);
3345 }
3346
3347 /* The documentation for this function is in its prototype declaration
3348 in target.h. */
3349
3350 int
3351 target_ranged_break_num_registers (void)
3352 {
3353 return current_target.to_ranged_break_num_registers (&current_target);
3354 }
3355
3356 /* See target.h. */
3357
3358 struct btrace_target_info *
3359 target_enable_btrace (ptid_t ptid)
3360 {
3361 return current_target.to_enable_btrace (&current_target, ptid);
3362 }
3363
3364 /* See target.h. */
3365
3366 void
3367 target_disable_btrace (struct btrace_target_info *btinfo)
3368 {
3369 current_target.to_disable_btrace (&current_target, btinfo);
3370 }
3371
3372 /* See target.h. */
3373
3374 void
3375 target_teardown_btrace (struct btrace_target_info *btinfo)
3376 {
3377 current_target.to_teardown_btrace (&current_target, btinfo);
3378 }
3379
3380 /* See target.h. */
3381
3382 enum btrace_error
3383 target_read_btrace (VEC (btrace_block_s) **btrace,
3384 struct btrace_target_info *btinfo,
3385 enum btrace_read_type type)
3386 {
3387 return current_target.to_read_btrace (&current_target, btrace, btinfo, type);
3388 }
3389
3390 /* See target.h. */
3391
3392 void
3393 target_stop_recording (void)
3394 {
3395 current_target.to_stop_recording (&current_target);
3396 }
3397
3398 /* See target.h. */
3399
3400 void
3401 target_save_record (const char *filename)
3402 {
3403 current_target.to_save_record (&current_target, filename);
3404 }
3405
3406 /* See target.h. */
3407
3408 int
3409 target_supports_delete_record (void)
3410 {
3411 struct target_ops *t;
3412
3413 for (t = current_target.beneath; t != NULL; t = t->beneath)
3414 if (t->to_delete_record != delegate_delete_record
3415 && t->to_delete_record != tdefault_delete_record)
3416 return 1;
3417
3418 return 0;
3419 }
3420
3421 /* See target.h. */
3422
3423 void
3424 target_delete_record (void)
3425 {
3426 current_target.to_delete_record (&current_target);
3427 }
3428
3429 /* See target.h. */
3430
3431 int
3432 target_record_is_replaying (void)
3433 {
3434 return current_target.to_record_is_replaying (&current_target);
3435 }
3436
3437 /* See target.h. */
3438
3439 void
3440 target_goto_record_begin (void)
3441 {
3442 current_target.to_goto_record_begin (&current_target);
3443 }
3444
3445 /* See target.h. */
3446
3447 void
3448 target_goto_record_end (void)
3449 {
3450 current_target.to_goto_record_end (&current_target);
3451 }
3452
3453 /* See target.h. */
3454
3455 void
3456 target_goto_record (ULONGEST insn)
3457 {
3458 current_target.to_goto_record (&current_target, insn);
3459 }
3460
3461 /* See target.h. */
3462
3463 void
3464 target_insn_history (int size, int flags)
3465 {
3466 current_target.to_insn_history (&current_target, size, flags);
3467 }
3468
3469 /* See target.h. */
3470
3471 void
3472 target_insn_history_from (ULONGEST from, int size, int flags)
3473 {
3474 current_target.to_insn_history_from (&current_target, from, size, flags);
3475 }
3476
3477 /* See target.h. */
3478
3479 void
3480 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3481 {
3482 current_target.to_insn_history_range (&current_target, begin, end, flags);
3483 }
3484
3485 /* See target.h. */
3486
3487 void
3488 target_call_history (int size, int flags)
3489 {
3490 current_target.to_call_history (&current_target, size, flags);
3491 }
3492
3493 /* See target.h. */
3494
3495 void
3496 target_call_history_from (ULONGEST begin, int size, int flags)
3497 {
3498 current_target.to_call_history_from (&current_target, begin, size, flags);
3499 }
3500
3501 /* See target.h. */
3502
3503 void
3504 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3505 {
3506 current_target.to_call_history_range (&current_target, begin, end, flags);
3507 }
3508
3509 /* See target.h. */
3510
3511 const struct frame_unwind *
3512 target_get_unwinder (void)
3513 {
3514 return current_target.to_get_unwinder (&current_target);
3515 }
3516
3517 /* See target.h. */
3518
3519 const struct frame_unwind *
3520 target_get_tailcall_unwinder (void)
3521 {
3522 return current_target.to_get_tailcall_unwinder (&current_target);
3523 }
3524
3525 /* Default implementation of to_decr_pc_after_break. */
3526
3527 static CORE_ADDR
3528 default_target_decr_pc_after_break (struct target_ops *ops,
3529 struct gdbarch *gdbarch)
3530 {
3531 return gdbarch_decr_pc_after_break (gdbarch);
3532 }
3533
3534 /* See target.h. */
3535
3536 CORE_ADDR
3537 target_decr_pc_after_break (struct gdbarch *gdbarch)
3538 {
3539 return current_target.to_decr_pc_after_break (&current_target, gdbarch);
3540 }
3541
3542 /* See target.h. */
3543
3544 void
3545 target_prepare_to_generate_core (void)
3546 {
3547 current_target.to_prepare_to_generate_core (&current_target);
3548 }
3549
3550 /* See target.h. */
3551
3552 void
3553 target_done_generating_core (void)
3554 {
3555 current_target.to_done_generating_core (&current_target);
3556 }
3557
3558 static void
3559 setup_target_debug (void)
3560 {
3561 memcpy (&debug_target, &current_target, sizeof debug_target);
3562
3563 init_debug_target (&current_target);
3564 }
3565 \f
3566
3567 static char targ_desc[] =
3568 "Names of targets and files being debugged.\nShows the entire \
3569 stack of targets currently in use (including the exec-file,\n\
3570 core-file, and process, if any), as well as the symbol file name.";
3571
3572 static void
3573 default_rcmd (struct target_ops *self, const char *command,
3574 struct ui_file *output)
3575 {
3576 error (_("\"monitor\" command not supported by this target."));
3577 }
3578
3579 static void
3580 do_monitor_command (char *cmd,
3581 int from_tty)
3582 {
3583 target_rcmd (cmd, gdb_stdtarg);
3584 }
3585
3586 /* Print the name of each layers of our target stack. */
3587
3588 static void
3589 maintenance_print_target_stack (char *cmd, int from_tty)
3590 {
3591 struct target_ops *t;
3592
3593 printf_filtered (_("The current target stack is:\n"));
3594
3595 for (t = target_stack; t != NULL; t = t->beneath)
3596 {
3597 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3598 }
3599 }
3600
3601 /* Controls if targets can report that they can/are async. This is
3602 just for maintainers to use when debugging gdb. */
3603 int target_async_permitted = 1;
3604
3605 /* The set command writes to this variable. If the inferior is
3606 executing, target_async_permitted is *not* updated. */
3607 static int target_async_permitted_1 = 1;
3608
3609 static void
3610 maint_set_target_async_command (char *args, int from_tty,
3611 struct cmd_list_element *c)
3612 {
3613 if (have_live_inferiors ())
3614 {
3615 target_async_permitted_1 = target_async_permitted;
3616 error (_("Cannot change this setting while the inferior is running."));
3617 }
3618
3619 target_async_permitted = target_async_permitted_1;
3620 }
3621
3622 static void
3623 maint_show_target_async_command (struct ui_file *file, int from_tty,
3624 struct cmd_list_element *c,
3625 const char *value)
3626 {
3627 fprintf_filtered (file,
3628 _("Controlling the inferior in "
3629 "asynchronous mode is %s.\n"), value);
3630 }
3631
3632 /* Temporary copies of permission settings. */
3633
3634 static int may_write_registers_1 = 1;
3635 static int may_write_memory_1 = 1;
3636 static int may_insert_breakpoints_1 = 1;
3637 static int may_insert_tracepoints_1 = 1;
3638 static int may_insert_fast_tracepoints_1 = 1;
3639 static int may_stop_1 = 1;
3640
3641 /* Make the user-set values match the real values again. */
3642
3643 void
3644 update_target_permissions (void)
3645 {
3646 may_write_registers_1 = may_write_registers;
3647 may_write_memory_1 = may_write_memory;
3648 may_insert_breakpoints_1 = may_insert_breakpoints;
3649 may_insert_tracepoints_1 = may_insert_tracepoints;
3650 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3651 may_stop_1 = may_stop;
3652 }
3653
3654 /* The one function handles (most of) the permission flags in the same
3655 way. */
3656
3657 static void
3658 set_target_permissions (char *args, int from_tty,
3659 struct cmd_list_element *c)
3660 {
3661 if (target_has_execution)
3662 {
3663 update_target_permissions ();
3664 error (_("Cannot change this setting while the inferior is running."));
3665 }
3666
3667 /* Make the real values match the user-changed values. */
3668 may_write_registers = may_write_registers_1;
3669 may_insert_breakpoints = may_insert_breakpoints_1;
3670 may_insert_tracepoints = may_insert_tracepoints_1;
3671 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3672 may_stop = may_stop_1;
3673 update_observer_mode ();
3674 }
3675
3676 /* Set memory write permission independently of observer mode. */
3677
3678 static void
3679 set_write_memory_permission (char *args, int from_tty,
3680 struct cmd_list_element *c)
3681 {
3682 /* Make the real values match the user-changed values. */
3683 may_write_memory = may_write_memory_1;
3684 update_observer_mode ();
3685 }
3686
3687
3688 void
3689 initialize_targets (void)
3690 {
3691 init_dummy_target ();
3692 push_target (&dummy_target);
3693
3694 add_info ("target", target_info, targ_desc);
3695 add_info ("files", target_info, targ_desc);
3696
3697 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3698 Set target debugging."), _("\
3699 Show target debugging."), _("\
3700 When non-zero, target debugging is enabled. Higher numbers are more\n\
3701 verbose."),
3702 set_targetdebug,
3703 show_targetdebug,
3704 &setdebuglist, &showdebuglist);
3705
3706 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3707 &trust_readonly, _("\
3708 Set mode for reading from readonly sections."), _("\
3709 Show mode for reading from readonly sections."), _("\
3710 When this mode is on, memory reads from readonly sections (such as .text)\n\
3711 will be read from the object file instead of from the target. This will\n\
3712 result in significant performance improvement for remote targets."),
3713 NULL,
3714 show_trust_readonly,
3715 &setlist, &showlist);
3716
3717 add_com ("monitor", class_obscure, do_monitor_command,
3718 _("Send a command to the remote monitor (remote targets only)."));
3719
3720 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3721 _("Print the name of each layer of the internal target stack."),
3722 &maintenanceprintlist);
3723
3724 add_setshow_boolean_cmd ("target-async", no_class,
3725 &target_async_permitted_1, _("\
3726 Set whether gdb controls the inferior in asynchronous mode."), _("\
3727 Show whether gdb controls the inferior in asynchronous mode."), _("\
3728 Tells gdb whether to control the inferior in asynchronous mode."),
3729 maint_set_target_async_command,
3730 maint_show_target_async_command,
3731 &maintenance_set_cmdlist,
3732 &maintenance_show_cmdlist);
3733
3734 add_setshow_boolean_cmd ("may-write-registers", class_support,
3735 &may_write_registers_1, _("\
3736 Set permission to write into registers."), _("\
3737 Show permission to write into registers."), _("\
3738 When this permission is on, GDB may write into the target's registers.\n\
3739 Otherwise, any sort of write attempt will result in an error."),
3740 set_target_permissions, NULL,
3741 &setlist, &showlist);
3742
3743 add_setshow_boolean_cmd ("may-write-memory", class_support,
3744 &may_write_memory_1, _("\
3745 Set permission to write into target memory."), _("\
3746 Show permission to write into target memory."), _("\
3747 When this permission is on, GDB may write into the target's memory.\n\
3748 Otherwise, any sort of write attempt will result in an error."),
3749 set_write_memory_permission, NULL,
3750 &setlist, &showlist);
3751
3752 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
3753 &may_insert_breakpoints_1, _("\
3754 Set permission to insert breakpoints in the target."), _("\
3755 Show permission to insert breakpoints in the target."), _("\
3756 When this permission is on, GDB may insert breakpoints in the program.\n\
3757 Otherwise, any sort of insertion attempt will result in an error."),
3758 set_target_permissions, NULL,
3759 &setlist, &showlist);
3760
3761 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
3762 &may_insert_tracepoints_1, _("\
3763 Set permission to insert tracepoints in the target."), _("\
3764 Show permission to insert tracepoints in the target."), _("\
3765 When this permission is on, GDB may insert tracepoints in the program.\n\
3766 Otherwise, any sort of insertion attempt will result in an error."),
3767 set_target_permissions, NULL,
3768 &setlist, &showlist);
3769
3770 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
3771 &may_insert_fast_tracepoints_1, _("\
3772 Set permission to insert fast tracepoints in the target."), _("\
3773 Show permission to insert fast tracepoints in the target."), _("\
3774 When this permission is on, GDB may insert fast tracepoints.\n\
3775 Otherwise, any sort of insertion attempt will result in an error."),
3776 set_target_permissions, NULL,
3777 &setlist, &showlist);
3778
3779 add_setshow_boolean_cmd ("may-interrupt", class_support,
3780 &may_stop_1, _("\
3781 Set permission to interrupt or signal the target."), _("\
3782 Show permission to interrupt or signal the target."), _("\
3783 When this permission is on, GDB may interrupt/stop the target's execution.\n\
3784 Otherwise, any attempt to interrupt or stop will be ignored."),
3785 set_target_permissions, NULL,
3786 &setlist, &showlist);
3787
3788 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
3789 &auto_connect_native_target, _("\
3790 Set whether GDB may automatically connect to the native target."), _("\
3791 Show whether GDB may automatically connect to the native target."), _("\
3792 When on, and GDB is not connected to a target yet, GDB\n\
3793 attempts \"run\" and other commands with the native target."),
3794 NULL, show_auto_connect_native_target,
3795 &setlist, &showlist);
3796 }
This page took 0.144538 seconds and 5 git commands to generate.