Convert default_child_has_foo functions to process_stratum_target methods
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2018 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "byte-vector.h"
50 #include "terminal.h"
51 #include <algorithm>
52 #include <unordered_map>
53
54 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
55
56 static void default_terminal_info (struct target_ops *, const char *, int);
57
58 static int default_watchpoint_addr_within_range (struct target_ops *,
59 CORE_ADDR, CORE_ADDR, int);
60
61 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
62 CORE_ADDR, int);
63
64 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
65
66 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
67 long lwp, long tid);
68
69 static int default_follow_fork (struct target_ops *self, int follow_child,
70 int detach_fork);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static void tcomplain (void) ATTRIBUTE_NORETURN;
86
87 static struct target_ops *find_default_run_target (const char *);
88
89 static int dummy_find_memory_regions (struct target_ops *self,
90 find_memory_region_ftype ignore1,
91 void *ignore2);
92
93 static char *dummy_make_corefile_notes (struct target_ops *self,
94 bfd *ignore1, int *ignore2);
95
96 static const char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 /* Mapping between target_info objects (which have address identity)
102 and corresponding open/factory function/callback. Each add_target
103 call adds one entry to this map, and registers a "target
104 TARGET_NAME" command that when invoked calls the factory registered
105 here. The target_info object is associated with the command via
106 the command's context. */
107 static std::unordered_map<const target_info *, target_open_ftype *>
108 target_factories;
109
110 /* The initial current target, so that there is always a semi-valid
111 current target. */
112
113 static struct target_ops *the_dummy_target;
114 static struct target_ops *the_debug_target;
115
116 /* The target stack. */
117
118 static target_stack g_target_stack;
119
120 /* Top of target stack. */
121 /* The target structure we are currently using to talk to a process
122 or file or whatever "inferior" we have. */
123
124 target_ops *
125 current_top_target ()
126 {
127 return g_target_stack.top ();
128 }
129
130 /* Command list for target. */
131
132 static struct cmd_list_element *targetlist = NULL;
133
134 /* Nonzero if we should trust readonly sections from the
135 executable when reading memory. */
136
137 static int trust_readonly = 0;
138
139 /* Nonzero if we should show true memory content including
140 memory breakpoint inserted by gdb. */
141
142 static int show_memory_breakpoints = 0;
143
144 /* These globals control whether GDB attempts to perform these
145 operations; they are useful for targets that need to prevent
146 inadvertant disruption, such as in non-stop mode. */
147
148 int may_write_registers = 1;
149
150 int may_write_memory = 1;
151
152 int may_insert_breakpoints = 1;
153
154 int may_insert_tracepoints = 1;
155
156 int may_insert_fast_tracepoints = 1;
157
158 int may_stop = 1;
159
160 /* Non-zero if we want to see trace of target level stuff. */
161
162 static unsigned int targetdebug = 0;
163
164 static void
165 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
166 {
167 if (targetdebug)
168 push_target (the_debug_target);
169 else
170 unpush_target (the_debug_target);
171 }
172
173 static void
174 show_targetdebug (struct ui_file *file, int from_tty,
175 struct cmd_list_element *c, const char *value)
176 {
177 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
178 }
179
180 /* The user just typed 'target' without the name of a target. */
181
182 static void
183 target_command (const char *arg, int from_tty)
184 {
185 fputs_filtered ("Argument required (target name). Try `help target'\n",
186 gdb_stdout);
187 }
188
189 int
190 target_has_all_memory_1 (void)
191 {
192 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
193 if (t->has_all_memory ())
194 return 1;
195
196 return 0;
197 }
198
199 int
200 target_has_memory_1 (void)
201 {
202 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
203 if (t->has_memory ())
204 return 1;
205
206 return 0;
207 }
208
209 int
210 target_has_stack_1 (void)
211 {
212 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
213 if (t->has_stack ())
214 return 1;
215
216 return 0;
217 }
218
219 int
220 target_has_registers_1 (void)
221 {
222 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
223 if (t->has_registers ())
224 return 1;
225
226 return 0;
227 }
228
229 int
230 target_has_execution_1 (ptid_t the_ptid)
231 {
232 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
233 if (t->has_execution (the_ptid))
234 return 1;
235
236 return 0;
237 }
238
239 int
240 target_has_execution_current (void)
241 {
242 return target_has_execution_1 (inferior_ptid);
243 }
244
245 /* This is used to implement the various target commands. */
246
247 static void
248 open_target (const char *args, int from_tty, struct cmd_list_element *command)
249 {
250 auto *ti = static_cast<target_info *> (get_cmd_context (command));
251 target_open_ftype *func = target_factories[ti];
252
253 if (targetdebug)
254 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
255 ti->shortname);
256
257 func (args, from_tty);
258
259 if (targetdebug)
260 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
261 ti->shortname, args, from_tty);
262 }
263
264 /* See target.h. */
265
266 void
267 add_target (const target_info &t, target_open_ftype *func,
268 completer_ftype *completer)
269 {
270 struct cmd_list_element *c;
271
272 auto &func_slot = target_factories[&t];
273 if (func_slot != nullptr)
274 internal_error (__FILE__, __LINE__,
275 _("target already added (\"%s\")."), t.shortname);
276 func_slot = func;
277
278 if (targetlist == NULL)
279 add_prefix_cmd ("target", class_run, target_command, _("\
280 Connect to a target machine or process.\n\
281 The first argument is the type or protocol of the target machine.\n\
282 Remaining arguments are interpreted by the target protocol. For more\n\
283 information on the arguments for a particular protocol, type\n\
284 `help target ' followed by the protocol name."),
285 &targetlist, "target ", 0, &cmdlist);
286 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
287 set_cmd_context (c, (void *) &t);
288 set_cmd_sfunc (c, open_target);
289 if (completer != NULL)
290 set_cmd_completer (c, completer);
291 }
292
293 /* See target.h. */
294
295 void
296 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
297 {
298 struct cmd_list_element *c;
299 char *alt;
300
301 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
302 see PR cli/15104. */
303 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
304 set_cmd_sfunc (c, open_target);
305 set_cmd_context (c, (void *) &tinfo);
306 alt = xstrprintf ("target %s", tinfo.shortname);
307 deprecate_cmd (c, alt);
308 }
309
310 /* Stub functions */
311
312 void
313 target_kill (void)
314 {
315 current_top_target ()->kill ();
316 }
317
318 void
319 target_load (const char *arg, int from_tty)
320 {
321 target_dcache_invalidate ();
322 current_top_target ()->load (arg, from_tty);
323 }
324
325 /* Define it. */
326
327 target_terminal_state target_terminal::m_terminal_state
328 = target_terminal_state::is_ours;
329
330 /* See target/target.h. */
331
332 void
333 target_terminal::init (void)
334 {
335 current_top_target ()->terminal_init ();
336
337 m_terminal_state = target_terminal_state::is_ours;
338 }
339
340 /* See target/target.h. */
341
342 void
343 target_terminal::inferior (void)
344 {
345 struct ui *ui = current_ui;
346
347 /* A background resume (``run&'') should leave GDB in control of the
348 terminal. */
349 if (ui->prompt_state != PROMPT_BLOCKED)
350 return;
351
352 /* Since we always run the inferior in the main console (unless "set
353 inferior-tty" is in effect), when some UI other than the main one
354 calls target_terminal::inferior, then we leave the main UI's
355 terminal settings as is. */
356 if (ui != main_ui)
357 return;
358
359 /* If GDB is resuming the inferior in the foreground, install
360 inferior's terminal modes. */
361
362 struct inferior *inf = current_inferior ();
363
364 if (inf->terminal_state != target_terminal_state::is_inferior)
365 {
366 current_top_target ()->terminal_inferior ();
367 inf->terminal_state = target_terminal_state::is_inferior;
368 }
369
370 m_terminal_state = target_terminal_state::is_inferior;
371
372 /* If the user hit C-c before, pretend that it was hit right
373 here. */
374 if (check_quit_flag ())
375 target_pass_ctrlc ();
376 }
377
378 /* See target/target.h. */
379
380 void
381 target_terminal::restore_inferior (void)
382 {
383 struct ui *ui = current_ui;
384
385 /* See target_terminal::inferior(). */
386 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
387 return;
388
389 /* Restore the terminal settings of inferiors that were in the
390 foreground but are now ours_for_output due to a temporary
391 target_target::ours_for_output() call. */
392
393 {
394 scoped_restore_current_inferior restore_inferior;
395
396 for (struct inferior *inf : all_inferiors ())
397 {
398 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
399 {
400 set_current_inferior (inf);
401 current_top_target ()->terminal_inferior ();
402 inf->terminal_state = target_terminal_state::is_inferior;
403 }
404 }
405 }
406
407 m_terminal_state = target_terminal_state::is_inferior;
408
409 /* If the user hit C-c before, pretend that it was hit right
410 here. */
411 if (check_quit_flag ())
412 target_pass_ctrlc ();
413 }
414
415 /* Switch terminal state to DESIRED_STATE, either is_ours, or
416 is_ours_for_output. */
417
418 static void
419 target_terminal_is_ours_kind (target_terminal_state desired_state)
420 {
421 scoped_restore_current_inferior restore_inferior;
422
423 /* Must do this in two passes. First, have all inferiors save the
424 current terminal settings. Then, after all inferiors have add a
425 chance to safely save the terminal settings, restore GDB's
426 terminal settings. */
427
428 for (inferior *inf : all_inferiors ())
429 {
430 if (inf->terminal_state == target_terminal_state::is_inferior)
431 {
432 set_current_inferior (inf);
433 current_top_target ()->terminal_save_inferior ();
434 }
435 }
436
437 for (inferior *inf : all_inferiors ())
438 {
439 /* Note we don't check is_inferior here like above because we
440 need to handle 'is_ours_for_output -> is_ours' too. Careful
441 to never transition from 'is_ours' to 'is_ours_for_output',
442 though. */
443 if (inf->terminal_state != target_terminal_state::is_ours
444 && inf->terminal_state != desired_state)
445 {
446 set_current_inferior (inf);
447 if (desired_state == target_terminal_state::is_ours)
448 current_top_target ()->terminal_ours ();
449 else if (desired_state == target_terminal_state::is_ours_for_output)
450 current_top_target ()->terminal_ours_for_output ();
451 else
452 gdb_assert_not_reached ("unhandled desired state");
453 inf->terminal_state = desired_state;
454 }
455 }
456 }
457
458 /* See target/target.h. */
459
460 void
461 target_terminal::ours ()
462 {
463 struct ui *ui = current_ui;
464
465 /* See target_terminal::inferior. */
466 if (ui != main_ui)
467 return;
468
469 if (m_terminal_state == target_terminal_state::is_ours)
470 return;
471
472 target_terminal_is_ours_kind (target_terminal_state::is_ours);
473 m_terminal_state = target_terminal_state::is_ours;
474 }
475
476 /* See target/target.h. */
477
478 void
479 target_terminal::ours_for_output ()
480 {
481 struct ui *ui = current_ui;
482
483 /* See target_terminal::inferior. */
484 if (ui != main_ui)
485 return;
486
487 if (!target_terminal::is_inferior ())
488 return;
489
490 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
491 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
492 }
493
494 /* See target/target.h. */
495
496 void
497 target_terminal::info (const char *arg, int from_tty)
498 {
499 current_top_target ()->terminal_info (arg, from_tty);
500 }
501
502 /* See target.h. */
503
504 bool
505 target_supports_terminal_ours (void)
506 {
507 /* This can be called before there is any target, so we must check
508 for nullptr here. */
509 target_ops *top = current_top_target ();
510
511 if (top == nullptr)
512 return false;
513 return top->supports_terminal_ours ();
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_top_target ()->shortname ());
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
543 {
544 return ptid_t (inferior_ptid.pid (), lwp, tid);
545 }
546
547 static enum exec_direction_kind
548 default_execution_direction (struct target_ops *self)
549 {
550 if (!target_can_execute_reverse)
551 return EXEC_FORWARD;
552 else if (!target_can_async_p ())
553 return EXEC_FORWARD;
554 else
555 gdb_assert_not_reached ("\
556 to_execution_direction must be implemented for reverse async");
557 }
558
559 /* See target.h. */
560
561 void
562 target_stack::push (target_ops *t)
563 {
564 /* If there's already a target at this stratum, remove it. */
565 if (m_stack[t->to_stratum] != NULL)
566 {
567 target_ops *prev = m_stack[t->to_stratum];
568 m_stack[t->to_stratum] = NULL;
569 target_close (prev);
570 }
571
572 /* Now add the new one. */
573 m_stack[t->to_stratum] = t;
574
575 if (m_top < t->to_stratum)
576 m_top = t->to_stratum;
577 }
578
579 /* See target.h. */
580
581 void
582 push_target (struct target_ops *t)
583 {
584 g_target_stack.push (t);
585 }
586
587 /* See target.h. */
588
589 int
590 unpush_target (struct target_ops *t)
591 {
592 return g_target_stack.unpush (t);
593 }
594
595 /* See target.h. */
596
597 bool
598 target_stack::unpush (target_ops *t)
599 {
600 if (t->to_stratum == dummy_stratum)
601 internal_error (__FILE__, __LINE__,
602 _("Attempt to unpush the dummy target"));
603
604 gdb_assert (t != NULL);
605
606 /* Look for the specified target. Note that a target can only occur
607 once in the target stack. */
608
609 if (m_stack[t->to_stratum] != t)
610 {
611 /* If T wasn't pushed, quit. Only open targets should be
612 closed. */
613 return false;
614 }
615
616 /* Unchain the target. */
617 m_stack[t->to_stratum] = NULL;
618
619 if (m_top == t->to_stratum)
620 m_top = t->beneath ()->to_stratum;
621
622 /* Finally close the target. Note we do this after unchaining, so
623 any target method calls from within the target_close
624 implementation don't end up in T anymore. */
625 target_close (t);
626
627 return true;
628 }
629
630 /* Unpush TARGET and assert that it worked. */
631
632 static void
633 unpush_target_and_assert (struct target_ops *target)
634 {
635 if (!unpush_target (target))
636 {
637 fprintf_unfiltered (gdb_stderr,
638 "pop_all_targets couldn't find target %s\n",
639 target->shortname ());
640 internal_error (__FILE__, __LINE__,
641 _("failed internal consistency check"));
642 }
643 }
644
645 void
646 pop_all_targets_above (enum strata above_stratum)
647 {
648 while ((int) (current_top_target ()->to_stratum) > (int) above_stratum)
649 unpush_target_and_assert (current_top_target ());
650 }
651
652 /* See target.h. */
653
654 void
655 pop_all_targets_at_and_above (enum strata stratum)
656 {
657 while ((int) (current_top_target ()->to_stratum) >= (int) stratum)
658 unpush_target_and_assert (current_top_target ());
659 }
660
661 void
662 pop_all_targets (void)
663 {
664 pop_all_targets_above (dummy_stratum);
665 }
666
667 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
668
669 int
670 target_is_pushed (struct target_ops *t)
671 {
672 return g_target_stack.is_pushed (t);
673 }
674
675 /* Default implementation of to_get_thread_local_address. */
676
677 static void
678 generic_tls_error (void)
679 {
680 throw_error (TLS_GENERIC_ERROR,
681 _("Cannot find thread-local variables on this target"));
682 }
683
684 /* Using the objfile specified in OBJFILE, find the address for the
685 current thread's thread-local storage with offset OFFSET. */
686 CORE_ADDR
687 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
688 {
689 volatile CORE_ADDR addr = 0;
690 struct target_ops *target = current_top_target ();
691
692 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
693 {
694 ptid_t ptid = inferior_ptid;
695
696 TRY
697 {
698 CORE_ADDR lm_addr;
699
700 /* Fetch the load module address for this objfile. */
701 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
702 objfile);
703
704 addr = target->get_thread_local_address (ptid, lm_addr, offset);
705 }
706 /* If an error occurred, print TLS related messages here. Otherwise,
707 throw the error to some higher catcher. */
708 CATCH (ex, RETURN_MASK_ALL)
709 {
710 int objfile_is_library = (objfile->flags & OBJF_SHARED);
711
712 switch (ex.error)
713 {
714 case TLS_NO_LIBRARY_SUPPORT_ERROR:
715 error (_("Cannot find thread-local variables "
716 "in this thread library."));
717 break;
718 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
719 if (objfile_is_library)
720 error (_("Cannot find shared library `%s' in dynamic"
721 " linker's load module list"), objfile_name (objfile));
722 else
723 error (_("Cannot find executable file `%s' in dynamic"
724 " linker's load module list"), objfile_name (objfile));
725 break;
726 case TLS_NOT_ALLOCATED_YET_ERROR:
727 if (objfile_is_library)
728 error (_("The inferior has not yet allocated storage for"
729 " thread-local variables in\n"
730 "the shared library `%s'\n"
731 "for %s"),
732 objfile_name (objfile), target_pid_to_str (ptid));
733 else
734 error (_("The inferior has not yet allocated storage for"
735 " thread-local variables in\n"
736 "the executable `%s'\n"
737 "for %s"),
738 objfile_name (objfile), target_pid_to_str (ptid));
739 break;
740 case TLS_GENERIC_ERROR:
741 if (objfile_is_library)
742 error (_("Cannot find thread-local storage for %s, "
743 "shared library %s:\n%s"),
744 target_pid_to_str (ptid),
745 objfile_name (objfile), ex.message);
746 else
747 error (_("Cannot find thread-local storage for %s, "
748 "executable file %s:\n%s"),
749 target_pid_to_str (ptid),
750 objfile_name (objfile), ex.message);
751 break;
752 default:
753 throw_exception (ex);
754 break;
755 }
756 }
757 END_CATCH
758 }
759 /* It wouldn't be wrong here to try a gdbarch method, too; finding
760 TLS is an ABI-specific thing. But we don't do that yet. */
761 else
762 error (_("Cannot find thread-local variables on this target"));
763
764 return addr;
765 }
766
767 const char *
768 target_xfer_status_to_string (enum target_xfer_status status)
769 {
770 #define CASE(X) case X: return #X
771 switch (status)
772 {
773 CASE(TARGET_XFER_E_IO);
774 CASE(TARGET_XFER_UNAVAILABLE);
775 default:
776 return "<unknown>";
777 }
778 #undef CASE
779 };
780
781
782 #undef MIN
783 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
784
785 /* target_read_string -- read a null terminated string, up to LEN bytes,
786 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
787 Set *STRING to a pointer to malloc'd memory containing the data; the caller
788 is responsible for freeing it. Return the number of bytes successfully
789 read. */
790
791 int
792 target_read_string (CORE_ADDR memaddr, gdb::unique_xmalloc_ptr<char> *string,
793 int len, int *errnop)
794 {
795 int tlen, offset, i;
796 gdb_byte buf[4];
797 int errcode = 0;
798 char *buffer;
799 int buffer_allocated;
800 char *bufptr;
801 unsigned int nbytes_read = 0;
802
803 gdb_assert (string);
804
805 /* Small for testing. */
806 buffer_allocated = 4;
807 buffer = (char *) xmalloc (buffer_allocated);
808 bufptr = buffer;
809
810 while (len > 0)
811 {
812 tlen = MIN (len, 4 - (memaddr & 3));
813 offset = memaddr & 3;
814
815 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
816 if (errcode != 0)
817 {
818 /* The transfer request might have crossed the boundary to an
819 unallocated region of memory. Retry the transfer, requesting
820 a single byte. */
821 tlen = 1;
822 offset = 0;
823 errcode = target_read_memory (memaddr, buf, 1);
824 if (errcode != 0)
825 goto done;
826 }
827
828 if (bufptr - buffer + tlen > buffer_allocated)
829 {
830 unsigned int bytes;
831
832 bytes = bufptr - buffer;
833 buffer_allocated *= 2;
834 buffer = (char *) xrealloc (buffer, buffer_allocated);
835 bufptr = buffer + bytes;
836 }
837
838 for (i = 0; i < tlen; i++)
839 {
840 *bufptr++ = buf[i + offset];
841 if (buf[i + offset] == '\000')
842 {
843 nbytes_read += i + 1;
844 goto done;
845 }
846 }
847
848 memaddr += tlen;
849 len -= tlen;
850 nbytes_read += tlen;
851 }
852 done:
853 string->reset (buffer);
854 if (errnop != NULL)
855 *errnop = errcode;
856 return nbytes_read;
857 }
858
859 struct target_section_table *
860 target_get_section_table (struct target_ops *target)
861 {
862 return target->get_section_table ();
863 }
864
865 /* Find a section containing ADDR. */
866
867 struct target_section *
868 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
869 {
870 struct target_section_table *table = target_get_section_table (target);
871 struct target_section *secp;
872
873 if (table == NULL)
874 return NULL;
875
876 for (secp = table->sections; secp < table->sections_end; secp++)
877 {
878 if (addr >= secp->addr && addr < secp->endaddr)
879 return secp;
880 }
881 return NULL;
882 }
883
884
885 /* Helper for the memory xfer routines. Checks the attributes of the
886 memory region of MEMADDR against the read or write being attempted.
887 If the access is permitted returns true, otherwise returns false.
888 REGION_P is an optional output parameter. If not-NULL, it is
889 filled with a pointer to the memory region of MEMADDR. REG_LEN
890 returns LEN trimmed to the end of the region. This is how much the
891 caller can continue requesting, if the access is permitted. A
892 single xfer request must not straddle memory region boundaries. */
893
894 static int
895 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
896 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
897 struct mem_region **region_p)
898 {
899 struct mem_region *region;
900
901 region = lookup_mem_region (memaddr);
902
903 if (region_p != NULL)
904 *region_p = region;
905
906 switch (region->attrib.mode)
907 {
908 case MEM_RO:
909 if (writebuf != NULL)
910 return 0;
911 break;
912
913 case MEM_WO:
914 if (readbuf != NULL)
915 return 0;
916 break;
917
918 case MEM_FLASH:
919 /* We only support writing to flash during "load" for now. */
920 if (writebuf != NULL)
921 error (_("Writing to flash memory forbidden in this context"));
922 break;
923
924 case MEM_NONE:
925 return 0;
926 }
927
928 /* region->hi == 0 means there's no upper bound. */
929 if (memaddr + len < region->hi || region->hi == 0)
930 *reg_len = len;
931 else
932 *reg_len = region->hi - memaddr;
933
934 return 1;
935 }
936
937 /* Read memory from more than one valid target. A core file, for
938 instance, could have some of memory but delegate other bits to
939 the target below it. So, we must manually try all targets. */
940
941 enum target_xfer_status
942 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
943 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
944 ULONGEST *xfered_len)
945 {
946 enum target_xfer_status res;
947
948 do
949 {
950 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
951 readbuf, writebuf, memaddr, len,
952 xfered_len);
953 if (res == TARGET_XFER_OK)
954 break;
955
956 /* Stop if the target reports that the memory is not available. */
957 if (res == TARGET_XFER_UNAVAILABLE)
958 break;
959
960 /* We want to continue past core files to executables, but not
961 past a running target's memory. */
962 if (ops->has_all_memory ())
963 break;
964
965 ops = ops->beneath ();
966 }
967 while (ops != NULL);
968
969 /* The cache works at the raw memory level. Make sure the cache
970 gets updated with raw contents no matter what kind of memory
971 object was originally being written. Note we do write-through
972 first, so that if it fails, we don't write to the cache contents
973 that never made it to the target. */
974 if (writebuf != NULL
975 && inferior_ptid != null_ptid
976 && target_dcache_init_p ()
977 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
978 {
979 DCACHE *dcache = target_dcache_get ();
980
981 /* Note that writing to an area of memory which wasn't present
982 in the cache doesn't cause it to be loaded in. */
983 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
984 }
985
986 return res;
987 }
988
989 /* Perform a partial memory transfer.
990 For docs see target.h, to_xfer_partial. */
991
992 static enum target_xfer_status
993 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
994 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
995 ULONGEST len, ULONGEST *xfered_len)
996 {
997 enum target_xfer_status res;
998 ULONGEST reg_len;
999 struct mem_region *region;
1000 struct inferior *inf;
1001
1002 /* For accesses to unmapped overlay sections, read directly from
1003 files. Must do this first, as MEMADDR may need adjustment. */
1004 if (readbuf != NULL && overlay_debugging)
1005 {
1006 struct obj_section *section = find_pc_overlay (memaddr);
1007
1008 if (pc_in_unmapped_range (memaddr, section))
1009 {
1010 struct target_section_table *table
1011 = target_get_section_table (ops);
1012 const char *section_name = section->the_bfd_section->name;
1013
1014 memaddr = overlay_mapped_address (memaddr, section);
1015 return section_table_xfer_memory_partial (readbuf, writebuf,
1016 memaddr, len, xfered_len,
1017 table->sections,
1018 table->sections_end,
1019 section_name);
1020 }
1021 }
1022
1023 /* Try the executable files, if "trust-readonly-sections" is set. */
1024 if (readbuf != NULL && trust_readonly)
1025 {
1026 struct target_section *secp;
1027 struct target_section_table *table;
1028
1029 secp = target_section_by_addr (ops, memaddr);
1030 if (secp != NULL
1031 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1032 secp->the_bfd_section)
1033 & SEC_READONLY))
1034 {
1035 table = target_get_section_table (ops);
1036 return section_table_xfer_memory_partial (readbuf, writebuf,
1037 memaddr, len, xfered_len,
1038 table->sections,
1039 table->sections_end,
1040 NULL);
1041 }
1042 }
1043
1044 /* Try GDB's internal data cache. */
1045
1046 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1047 &region))
1048 return TARGET_XFER_E_IO;
1049
1050 if (inferior_ptid != null_ptid)
1051 inf = current_inferior ();
1052 else
1053 inf = NULL;
1054
1055 if (inf != NULL
1056 && readbuf != NULL
1057 /* The dcache reads whole cache lines; that doesn't play well
1058 with reading from a trace buffer, because reading outside of
1059 the collected memory range fails. */
1060 && get_traceframe_number () == -1
1061 && (region->attrib.cache
1062 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1063 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1064 {
1065 DCACHE *dcache = target_dcache_get_or_init ();
1066
1067 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1068 reg_len, xfered_len);
1069 }
1070
1071 /* If none of those methods found the memory we wanted, fall back
1072 to a target partial transfer. Normally a single call to
1073 to_xfer_partial is enough; if it doesn't recognize an object
1074 it will call the to_xfer_partial of the next target down.
1075 But for memory this won't do. Memory is the only target
1076 object which can be read from more than one valid target.
1077 A core file, for instance, could have some of memory but
1078 delegate other bits to the target below it. So, we must
1079 manually try all targets. */
1080
1081 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1082 xfered_len);
1083
1084 /* If we still haven't got anything, return the last error. We
1085 give up. */
1086 return res;
1087 }
1088
1089 /* Perform a partial memory transfer. For docs see target.h,
1090 to_xfer_partial. */
1091
1092 static enum target_xfer_status
1093 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1094 gdb_byte *readbuf, const gdb_byte *writebuf,
1095 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1096 {
1097 enum target_xfer_status res;
1098
1099 /* Zero length requests are ok and require no work. */
1100 if (len == 0)
1101 return TARGET_XFER_EOF;
1102
1103 memaddr = address_significant (target_gdbarch (), memaddr);
1104
1105 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1106 breakpoint insns, thus hiding out from higher layers whether
1107 there are software breakpoints inserted in the code stream. */
1108 if (readbuf != NULL)
1109 {
1110 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1111 xfered_len);
1112
1113 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1114 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1115 }
1116 else
1117 {
1118 /* A large write request is likely to be partially satisfied
1119 by memory_xfer_partial_1. We will continually malloc
1120 and free a copy of the entire write request for breakpoint
1121 shadow handling even though we only end up writing a small
1122 subset of it. Cap writes to a limit specified by the target
1123 to mitigate this. */
1124 len = std::min (ops->get_memory_xfer_limit (), len);
1125
1126 gdb::byte_vector buf (writebuf, writebuf + len);
1127 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1128 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1129 xfered_len);
1130 }
1131
1132 return res;
1133 }
1134
1135 scoped_restore_tmpl<int>
1136 make_scoped_restore_show_memory_breakpoints (int show)
1137 {
1138 return make_scoped_restore (&show_memory_breakpoints, show);
1139 }
1140
1141 /* For docs see target.h, to_xfer_partial. */
1142
1143 enum target_xfer_status
1144 target_xfer_partial (struct target_ops *ops,
1145 enum target_object object, const char *annex,
1146 gdb_byte *readbuf, const gdb_byte *writebuf,
1147 ULONGEST offset, ULONGEST len,
1148 ULONGEST *xfered_len)
1149 {
1150 enum target_xfer_status retval;
1151
1152 /* Transfer is done when LEN is zero. */
1153 if (len == 0)
1154 return TARGET_XFER_EOF;
1155
1156 if (writebuf && !may_write_memory)
1157 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1158 core_addr_to_string_nz (offset), plongest (len));
1159
1160 *xfered_len = 0;
1161
1162 /* If this is a memory transfer, let the memory-specific code
1163 have a look at it instead. Memory transfers are more
1164 complicated. */
1165 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1166 || object == TARGET_OBJECT_CODE_MEMORY)
1167 retval = memory_xfer_partial (ops, object, readbuf,
1168 writebuf, offset, len, xfered_len);
1169 else if (object == TARGET_OBJECT_RAW_MEMORY)
1170 {
1171 /* Skip/avoid accessing the target if the memory region
1172 attributes block the access. Check this here instead of in
1173 raw_memory_xfer_partial as otherwise we'd end up checking
1174 this twice in the case of the memory_xfer_partial path is
1175 taken; once before checking the dcache, and another in the
1176 tail call to raw_memory_xfer_partial. */
1177 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1178 NULL))
1179 return TARGET_XFER_E_IO;
1180
1181 /* Request the normal memory object from other layers. */
1182 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1183 xfered_len);
1184 }
1185 else
1186 retval = ops->xfer_partial (object, annex, readbuf,
1187 writebuf, offset, len, xfered_len);
1188
1189 if (targetdebug)
1190 {
1191 const unsigned char *myaddr = NULL;
1192
1193 fprintf_unfiltered (gdb_stdlog,
1194 "%s:target_xfer_partial "
1195 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1196 ops->shortname (),
1197 (int) object,
1198 (annex ? annex : "(null)"),
1199 host_address_to_string (readbuf),
1200 host_address_to_string (writebuf),
1201 core_addr_to_string_nz (offset),
1202 pulongest (len), retval,
1203 pulongest (*xfered_len));
1204
1205 if (readbuf)
1206 myaddr = readbuf;
1207 if (writebuf)
1208 myaddr = writebuf;
1209 if (retval == TARGET_XFER_OK && myaddr != NULL)
1210 {
1211 int i;
1212
1213 fputs_unfiltered (", bytes =", gdb_stdlog);
1214 for (i = 0; i < *xfered_len; i++)
1215 {
1216 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1217 {
1218 if (targetdebug < 2 && i > 0)
1219 {
1220 fprintf_unfiltered (gdb_stdlog, " ...");
1221 break;
1222 }
1223 fprintf_unfiltered (gdb_stdlog, "\n");
1224 }
1225
1226 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1227 }
1228 }
1229
1230 fputc_unfiltered ('\n', gdb_stdlog);
1231 }
1232
1233 /* Check implementations of to_xfer_partial update *XFERED_LEN
1234 properly. Do assertion after printing debug messages, so that we
1235 can find more clues on assertion failure from debugging messages. */
1236 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1237 gdb_assert (*xfered_len > 0);
1238
1239 return retval;
1240 }
1241
1242 /* Read LEN bytes of target memory at address MEMADDR, placing the
1243 results in GDB's memory at MYADDR. Returns either 0 for success or
1244 -1 if any error occurs.
1245
1246 If an error occurs, no guarantee is made about the contents of the data at
1247 MYADDR. In particular, the caller should not depend upon partial reads
1248 filling the buffer with good data. There is no way for the caller to know
1249 how much good data might have been transfered anyway. Callers that can
1250 deal with partial reads should call target_read (which will retry until
1251 it makes no progress, and then return how much was transferred). */
1252
1253 int
1254 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1255 {
1256 if (target_read (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1257 myaddr, memaddr, len) == len)
1258 return 0;
1259 else
1260 return -1;
1261 }
1262
1263 /* See target/target.h. */
1264
1265 int
1266 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1267 {
1268 gdb_byte buf[4];
1269 int r;
1270
1271 r = target_read_memory (memaddr, buf, sizeof buf);
1272 if (r != 0)
1273 return r;
1274 *result = extract_unsigned_integer (buf, sizeof buf,
1275 gdbarch_byte_order (target_gdbarch ()));
1276 return 0;
1277 }
1278
1279 /* Like target_read_memory, but specify explicitly that this is a read
1280 from the target's raw memory. That is, this read bypasses the
1281 dcache, breakpoint shadowing, etc. */
1282
1283 int
1284 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1285 {
1286 if (target_read (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1287 myaddr, memaddr, len) == len)
1288 return 0;
1289 else
1290 return -1;
1291 }
1292
1293 /* Like target_read_memory, but specify explicitly that this is a read from
1294 the target's stack. This may trigger different cache behavior. */
1295
1296 int
1297 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1298 {
1299 if (target_read (current_top_target (), TARGET_OBJECT_STACK_MEMORY, NULL,
1300 myaddr, memaddr, len) == len)
1301 return 0;
1302 else
1303 return -1;
1304 }
1305
1306 /* Like target_read_memory, but specify explicitly that this is a read from
1307 the target's code. This may trigger different cache behavior. */
1308
1309 int
1310 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1311 {
1312 if (target_read (current_top_target (), TARGET_OBJECT_CODE_MEMORY, NULL,
1313 myaddr, memaddr, len) == len)
1314 return 0;
1315 else
1316 return -1;
1317 }
1318
1319 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1320 Returns either 0 for success or -1 if any error occurs. If an
1321 error occurs, no guarantee is made about how much data got written.
1322 Callers that can deal with partial writes should call
1323 target_write. */
1324
1325 int
1326 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1327 {
1328 if (target_write (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1329 myaddr, memaddr, len) == len)
1330 return 0;
1331 else
1332 return -1;
1333 }
1334
1335 /* Write LEN bytes from MYADDR to target raw memory at address
1336 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1337 If an error occurs, no guarantee is made about how much data got
1338 written. Callers that can deal with partial writes should call
1339 target_write. */
1340
1341 int
1342 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1343 {
1344 if (target_write (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1345 myaddr, memaddr, len) == len)
1346 return 0;
1347 else
1348 return -1;
1349 }
1350
1351 /* Fetch the target's memory map. */
1352
1353 std::vector<mem_region>
1354 target_memory_map (void)
1355 {
1356 std::vector<mem_region> result = current_top_target ()->memory_map ();
1357 if (result.empty ())
1358 return result;
1359
1360 std::sort (result.begin (), result.end ());
1361
1362 /* Check that regions do not overlap. Simultaneously assign
1363 a numbering for the "mem" commands to use to refer to
1364 each region. */
1365 mem_region *last_one = NULL;
1366 for (size_t ix = 0; ix < result.size (); ix++)
1367 {
1368 mem_region *this_one = &result[ix];
1369 this_one->number = ix;
1370
1371 if (last_one != NULL && last_one->hi > this_one->lo)
1372 {
1373 warning (_("Overlapping regions in memory map: ignoring"));
1374 return std::vector<mem_region> ();
1375 }
1376
1377 last_one = this_one;
1378 }
1379
1380 return result;
1381 }
1382
1383 void
1384 target_flash_erase (ULONGEST address, LONGEST length)
1385 {
1386 current_top_target ()->flash_erase (address, length);
1387 }
1388
1389 void
1390 target_flash_done (void)
1391 {
1392 current_top_target ()->flash_done ();
1393 }
1394
1395 static void
1396 show_trust_readonly (struct ui_file *file, int from_tty,
1397 struct cmd_list_element *c, const char *value)
1398 {
1399 fprintf_filtered (file,
1400 _("Mode for reading from readonly sections is %s.\n"),
1401 value);
1402 }
1403
1404 /* Target vector read/write partial wrapper functions. */
1405
1406 static enum target_xfer_status
1407 target_read_partial (struct target_ops *ops,
1408 enum target_object object,
1409 const char *annex, gdb_byte *buf,
1410 ULONGEST offset, ULONGEST len,
1411 ULONGEST *xfered_len)
1412 {
1413 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1414 xfered_len);
1415 }
1416
1417 static enum target_xfer_status
1418 target_write_partial (struct target_ops *ops,
1419 enum target_object object,
1420 const char *annex, const gdb_byte *buf,
1421 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1422 {
1423 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1424 xfered_len);
1425 }
1426
1427 /* Wrappers to perform the full transfer. */
1428
1429 /* For docs on target_read see target.h. */
1430
1431 LONGEST
1432 target_read (struct target_ops *ops,
1433 enum target_object object,
1434 const char *annex, gdb_byte *buf,
1435 ULONGEST offset, LONGEST len)
1436 {
1437 LONGEST xfered_total = 0;
1438 int unit_size = 1;
1439
1440 /* If we are reading from a memory object, find the length of an addressable
1441 unit for that architecture. */
1442 if (object == TARGET_OBJECT_MEMORY
1443 || object == TARGET_OBJECT_STACK_MEMORY
1444 || object == TARGET_OBJECT_CODE_MEMORY
1445 || object == TARGET_OBJECT_RAW_MEMORY)
1446 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1447
1448 while (xfered_total < len)
1449 {
1450 ULONGEST xfered_partial;
1451 enum target_xfer_status status;
1452
1453 status = target_read_partial (ops, object, annex,
1454 buf + xfered_total * unit_size,
1455 offset + xfered_total, len - xfered_total,
1456 &xfered_partial);
1457
1458 /* Call an observer, notifying them of the xfer progress? */
1459 if (status == TARGET_XFER_EOF)
1460 return xfered_total;
1461 else if (status == TARGET_XFER_OK)
1462 {
1463 xfered_total += xfered_partial;
1464 QUIT;
1465 }
1466 else
1467 return TARGET_XFER_E_IO;
1468
1469 }
1470 return len;
1471 }
1472
1473 /* Assuming that the entire [begin, end) range of memory cannot be
1474 read, try to read whatever subrange is possible to read.
1475
1476 The function returns, in RESULT, either zero or one memory block.
1477 If there's a readable subrange at the beginning, it is completely
1478 read and returned. Any further readable subrange will not be read.
1479 Otherwise, if there's a readable subrange at the end, it will be
1480 completely read and returned. Any readable subranges before it
1481 (obviously, not starting at the beginning), will be ignored. In
1482 other cases -- either no readable subrange, or readable subrange(s)
1483 that is neither at the beginning, or end, nothing is returned.
1484
1485 The purpose of this function is to handle a read across a boundary
1486 of accessible memory in a case when memory map is not available.
1487 The above restrictions are fine for this case, but will give
1488 incorrect results if the memory is 'patchy'. However, supporting
1489 'patchy' memory would require trying to read every single byte,
1490 and it seems unacceptable solution. Explicit memory map is
1491 recommended for this case -- and target_read_memory_robust will
1492 take care of reading multiple ranges then. */
1493
1494 static void
1495 read_whatever_is_readable (struct target_ops *ops,
1496 const ULONGEST begin, const ULONGEST end,
1497 int unit_size,
1498 std::vector<memory_read_result> *result)
1499 {
1500 ULONGEST current_begin = begin;
1501 ULONGEST current_end = end;
1502 int forward;
1503 ULONGEST xfered_len;
1504
1505 /* If we previously failed to read 1 byte, nothing can be done here. */
1506 if (end - begin <= 1)
1507 return;
1508
1509 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
1510
1511 /* Check that either first or the last byte is readable, and give up
1512 if not. This heuristic is meant to permit reading accessible memory
1513 at the boundary of accessible region. */
1514 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1515 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
1516 {
1517 forward = 1;
1518 ++current_begin;
1519 }
1520 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1521 buf.get () + (end - begin) - 1, end - 1, 1,
1522 &xfered_len) == TARGET_XFER_OK)
1523 {
1524 forward = 0;
1525 --current_end;
1526 }
1527 else
1528 return;
1529
1530 /* Loop invariant is that the [current_begin, current_end) was previously
1531 found to be not readable as a whole.
1532
1533 Note loop condition -- if the range has 1 byte, we can't divide the range
1534 so there's no point trying further. */
1535 while (current_end - current_begin > 1)
1536 {
1537 ULONGEST first_half_begin, first_half_end;
1538 ULONGEST second_half_begin, second_half_end;
1539 LONGEST xfer;
1540 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
1541
1542 if (forward)
1543 {
1544 first_half_begin = current_begin;
1545 first_half_end = middle;
1546 second_half_begin = middle;
1547 second_half_end = current_end;
1548 }
1549 else
1550 {
1551 first_half_begin = middle;
1552 first_half_end = current_end;
1553 second_half_begin = current_begin;
1554 second_half_end = middle;
1555 }
1556
1557 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1558 buf.get () + (first_half_begin - begin) * unit_size,
1559 first_half_begin,
1560 first_half_end - first_half_begin);
1561
1562 if (xfer == first_half_end - first_half_begin)
1563 {
1564 /* This half reads up fine. So, the error must be in the
1565 other half. */
1566 current_begin = second_half_begin;
1567 current_end = second_half_end;
1568 }
1569 else
1570 {
1571 /* This half is not readable. Because we've tried one byte, we
1572 know some part of this half if actually readable. Go to the next
1573 iteration to divide again and try to read.
1574
1575 We don't handle the other half, because this function only tries
1576 to read a single readable subrange. */
1577 current_begin = first_half_begin;
1578 current_end = first_half_end;
1579 }
1580 }
1581
1582 if (forward)
1583 {
1584 /* The [begin, current_begin) range has been read. */
1585 result->emplace_back (begin, current_end, std::move (buf));
1586 }
1587 else
1588 {
1589 /* The [current_end, end) range has been read. */
1590 LONGEST region_len = end - current_end;
1591
1592 gdb::unique_xmalloc_ptr<gdb_byte> data
1593 ((gdb_byte *) xmalloc (region_len * unit_size));
1594 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
1595 region_len * unit_size);
1596 result->emplace_back (current_end, end, std::move (data));
1597 }
1598 }
1599
1600 std::vector<memory_read_result>
1601 read_memory_robust (struct target_ops *ops,
1602 const ULONGEST offset, const LONGEST len)
1603 {
1604 std::vector<memory_read_result> result;
1605 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1606
1607 LONGEST xfered_total = 0;
1608 while (xfered_total < len)
1609 {
1610 struct mem_region *region = lookup_mem_region (offset + xfered_total);
1611 LONGEST region_len;
1612
1613 /* If there is no explicit region, a fake one should be created. */
1614 gdb_assert (region);
1615
1616 if (region->hi == 0)
1617 region_len = len - xfered_total;
1618 else
1619 region_len = region->hi - offset;
1620
1621 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1622 {
1623 /* Cannot read this region. Note that we can end up here only
1624 if the region is explicitly marked inaccessible, or
1625 'inaccessible-by-default' is in effect. */
1626 xfered_total += region_len;
1627 }
1628 else
1629 {
1630 LONGEST to_read = std::min (len - xfered_total, region_len);
1631 gdb::unique_xmalloc_ptr<gdb_byte> buffer
1632 ((gdb_byte *) xmalloc (to_read * unit_size));
1633
1634 LONGEST xfered_partial =
1635 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
1636 offset + xfered_total, to_read);
1637 /* Call an observer, notifying them of the xfer progress? */
1638 if (xfered_partial <= 0)
1639 {
1640 /* Got an error reading full chunk. See if maybe we can read
1641 some subrange. */
1642 read_whatever_is_readable (ops, offset + xfered_total,
1643 offset + xfered_total + to_read,
1644 unit_size, &result);
1645 xfered_total += to_read;
1646 }
1647 else
1648 {
1649 result.emplace_back (offset + xfered_total,
1650 offset + xfered_total + xfered_partial,
1651 std::move (buffer));
1652 xfered_total += xfered_partial;
1653 }
1654 QUIT;
1655 }
1656 }
1657
1658 return result;
1659 }
1660
1661
1662 /* An alternative to target_write with progress callbacks. */
1663
1664 LONGEST
1665 target_write_with_progress (struct target_ops *ops,
1666 enum target_object object,
1667 const char *annex, const gdb_byte *buf,
1668 ULONGEST offset, LONGEST len,
1669 void (*progress) (ULONGEST, void *), void *baton)
1670 {
1671 LONGEST xfered_total = 0;
1672 int unit_size = 1;
1673
1674 /* If we are writing to a memory object, find the length of an addressable
1675 unit for that architecture. */
1676 if (object == TARGET_OBJECT_MEMORY
1677 || object == TARGET_OBJECT_STACK_MEMORY
1678 || object == TARGET_OBJECT_CODE_MEMORY
1679 || object == TARGET_OBJECT_RAW_MEMORY)
1680 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1681
1682 /* Give the progress callback a chance to set up. */
1683 if (progress)
1684 (*progress) (0, baton);
1685
1686 while (xfered_total < len)
1687 {
1688 ULONGEST xfered_partial;
1689 enum target_xfer_status status;
1690
1691 status = target_write_partial (ops, object, annex,
1692 buf + xfered_total * unit_size,
1693 offset + xfered_total, len - xfered_total,
1694 &xfered_partial);
1695
1696 if (status != TARGET_XFER_OK)
1697 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
1698
1699 if (progress)
1700 (*progress) (xfered_partial, baton);
1701
1702 xfered_total += xfered_partial;
1703 QUIT;
1704 }
1705 return len;
1706 }
1707
1708 /* For docs on target_write see target.h. */
1709
1710 LONGEST
1711 target_write (struct target_ops *ops,
1712 enum target_object object,
1713 const char *annex, const gdb_byte *buf,
1714 ULONGEST offset, LONGEST len)
1715 {
1716 return target_write_with_progress (ops, object, annex, buf, offset, len,
1717 NULL, NULL);
1718 }
1719
1720 /* Help for target_read_alloc and target_read_stralloc. See their comments
1721 for details. */
1722
1723 template <typename T>
1724 gdb::optional<gdb::def_vector<T>>
1725 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1726 const char *annex)
1727 {
1728 gdb::def_vector<T> buf;
1729 size_t buf_pos = 0;
1730 const int chunk = 4096;
1731
1732 /* This function does not have a length parameter; it reads the
1733 entire OBJECT). Also, it doesn't support objects fetched partly
1734 from one target and partly from another (in a different stratum,
1735 e.g. a core file and an executable). Both reasons make it
1736 unsuitable for reading memory. */
1737 gdb_assert (object != TARGET_OBJECT_MEMORY);
1738
1739 /* Start by reading up to 4K at a time. The target will throttle
1740 this number down if necessary. */
1741 while (1)
1742 {
1743 ULONGEST xfered_len;
1744 enum target_xfer_status status;
1745
1746 buf.resize (buf_pos + chunk);
1747
1748 status = target_read_partial (ops, object, annex,
1749 (gdb_byte *) &buf[buf_pos],
1750 buf_pos, chunk,
1751 &xfered_len);
1752
1753 if (status == TARGET_XFER_EOF)
1754 {
1755 /* Read all there was. */
1756 buf.resize (buf_pos);
1757 return buf;
1758 }
1759 else if (status != TARGET_XFER_OK)
1760 {
1761 /* An error occurred. */
1762 return {};
1763 }
1764
1765 buf_pos += xfered_len;
1766
1767 QUIT;
1768 }
1769 }
1770
1771 /* See target.h */
1772
1773 gdb::optional<gdb::byte_vector>
1774 target_read_alloc (struct target_ops *ops, enum target_object object,
1775 const char *annex)
1776 {
1777 return target_read_alloc_1<gdb_byte> (ops, object, annex);
1778 }
1779
1780 /* See target.h. */
1781
1782 gdb::optional<gdb::char_vector>
1783 target_read_stralloc (struct target_ops *ops, enum target_object object,
1784 const char *annex)
1785 {
1786 gdb::optional<gdb::char_vector> buf
1787 = target_read_alloc_1<char> (ops, object, annex);
1788
1789 if (!buf)
1790 return {};
1791
1792 if (buf->back () != '\0')
1793 buf->push_back ('\0');
1794
1795 /* Check for embedded NUL bytes; but allow trailing NULs. */
1796 for (auto it = std::find (buf->begin (), buf->end (), '\0');
1797 it != buf->end (); it++)
1798 if (*it != '\0')
1799 {
1800 warning (_("target object %d, annex %s, "
1801 "contained unexpected null characters"),
1802 (int) object, annex ? annex : "(none)");
1803 break;
1804 }
1805
1806 return buf;
1807 }
1808
1809 /* Memory transfer methods. */
1810
1811 void
1812 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1813 LONGEST len)
1814 {
1815 /* This method is used to read from an alternate, non-current
1816 target. This read must bypass the overlay support (as symbols
1817 don't match this target), and GDB's internal cache (wrong cache
1818 for this target). */
1819 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1820 != len)
1821 memory_error (TARGET_XFER_E_IO, addr);
1822 }
1823
1824 ULONGEST
1825 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1826 int len, enum bfd_endian byte_order)
1827 {
1828 gdb_byte buf[sizeof (ULONGEST)];
1829
1830 gdb_assert (len <= sizeof (buf));
1831 get_target_memory (ops, addr, buf, len);
1832 return extract_unsigned_integer (buf, len, byte_order);
1833 }
1834
1835 /* See target.h. */
1836
1837 int
1838 target_insert_breakpoint (struct gdbarch *gdbarch,
1839 struct bp_target_info *bp_tgt)
1840 {
1841 if (!may_insert_breakpoints)
1842 {
1843 warning (_("May not insert breakpoints"));
1844 return 1;
1845 }
1846
1847 return current_top_target ()->insert_breakpoint (gdbarch, bp_tgt);
1848 }
1849
1850 /* See target.h. */
1851
1852 int
1853 target_remove_breakpoint (struct gdbarch *gdbarch,
1854 struct bp_target_info *bp_tgt,
1855 enum remove_bp_reason reason)
1856 {
1857 /* This is kind of a weird case to handle, but the permission might
1858 have been changed after breakpoints were inserted - in which case
1859 we should just take the user literally and assume that any
1860 breakpoints should be left in place. */
1861 if (!may_insert_breakpoints)
1862 {
1863 warning (_("May not remove breakpoints"));
1864 return 1;
1865 }
1866
1867 return current_top_target ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1868 }
1869
1870 static void
1871 info_target_command (const char *args, int from_tty)
1872 {
1873 int has_all_mem = 0;
1874
1875 if (symfile_objfile != NULL)
1876 printf_unfiltered (_("Symbols from \"%s\".\n"),
1877 objfile_name (symfile_objfile));
1878
1879 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
1880 {
1881 if (!t->has_memory ())
1882 continue;
1883
1884 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1885 continue;
1886 if (has_all_mem)
1887 printf_unfiltered (_("\tWhile running this, "
1888 "GDB does not access memory from...\n"));
1889 printf_unfiltered ("%s:\n", t->longname ());
1890 t->files_info ();
1891 has_all_mem = t->has_all_memory ();
1892 }
1893 }
1894
1895 /* This function is called before any new inferior is created, e.g.
1896 by running a program, attaching, or connecting to a target.
1897 It cleans up any state from previous invocations which might
1898 change between runs. This is a subset of what target_preopen
1899 resets (things which might change between targets). */
1900
1901 void
1902 target_pre_inferior (int from_tty)
1903 {
1904 /* Clear out solib state. Otherwise the solib state of the previous
1905 inferior might have survived and is entirely wrong for the new
1906 target. This has been observed on GNU/Linux using glibc 2.3. How
1907 to reproduce:
1908
1909 bash$ ./foo&
1910 [1] 4711
1911 bash$ ./foo&
1912 [1] 4712
1913 bash$ gdb ./foo
1914 [...]
1915 (gdb) attach 4711
1916 (gdb) detach
1917 (gdb) attach 4712
1918 Cannot access memory at address 0xdeadbeef
1919 */
1920
1921 /* In some OSs, the shared library list is the same/global/shared
1922 across inferiors. If code is shared between processes, so are
1923 memory regions and features. */
1924 if (!gdbarch_has_global_solist (target_gdbarch ()))
1925 {
1926 no_shared_libraries (NULL, from_tty);
1927
1928 invalidate_target_mem_regions ();
1929
1930 target_clear_description ();
1931 }
1932
1933 /* attach_flag may be set if the previous process associated with
1934 the inferior was attached to. */
1935 current_inferior ()->attach_flag = 0;
1936
1937 current_inferior ()->highest_thread_num = 0;
1938
1939 agent_capability_invalidate ();
1940 }
1941
1942 /* Callback for iterate_over_inferiors. Gets rid of the given
1943 inferior. */
1944
1945 static int
1946 dispose_inferior (struct inferior *inf, void *args)
1947 {
1948 /* Not all killed inferiors can, or will ever be, removed from the
1949 inferior list. Killed inferiors clearly don't need to be killed
1950 again, so, we're done. */
1951 if (inf->pid == 0)
1952 return 0;
1953
1954 thread_info *thread = any_thread_of_inferior (inf);
1955 if (thread != NULL)
1956 {
1957 switch_to_thread (thread);
1958
1959 /* Core inferiors actually should be detached, not killed. */
1960 if (target_has_execution)
1961 target_kill ();
1962 else
1963 target_detach (inf, 0);
1964 }
1965
1966 return 0;
1967 }
1968
1969 /* This is to be called by the open routine before it does
1970 anything. */
1971
1972 void
1973 target_preopen (int from_tty)
1974 {
1975 dont_repeat ();
1976
1977 if (have_inferiors ())
1978 {
1979 if (!from_tty
1980 || !have_live_inferiors ()
1981 || query (_("A program is being debugged already. Kill it? ")))
1982 iterate_over_inferiors (dispose_inferior, NULL);
1983 else
1984 error (_("Program not killed."));
1985 }
1986
1987 /* Calling target_kill may remove the target from the stack. But if
1988 it doesn't (which seems like a win for UDI), remove it now. */
1989 /* Leave the exec target, though. The user may be switching from a
1990 live process to a core of the same program. */
1991 pop_all_targets_above (file_stratum);
1992
1993 target_pre_inferior (from_tty);
1994 }
1995
1996 /* See target.h. */
1997
1998 void
1999 target_detach (inferior *inf, int from_tty)
2000 {
2001 /* As long as some to_detach implementations rely on the current_inferior
2002 (either directly, or indirectly, like through target_gdbarch or by
2003 reading memory), INF needs to be the current inferior. When that
2004 requirement will become no longer true, then we can remove this
2005 assertion. */
2006 gdb_assert (inf == current_inferior ());
2007
2008 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2009 /* Don't remove global breakpoints here. They're removed on
2010 disconnection from the target. */
2011 ;
2012 else
2013 /* If we're in breakpoints-always-inserted mode, have to remove
2014 breakpoints before detaching. */
2015 remove_breakpoints_inf (current_inferior ());
2016
2017 prepare_for_detach ();
2018
2019 current_top_target ()->detach (inf, from_tty);
2020 }
2021
2022 void
2023 target_disconnect (const char *args, int from_tty)
2024 {
2025 /* If we're in breakpoints-always-inserted mode or if breakpoints
2026 are global across processes, we have to remove them before
2027 disconnecting. */
2028 remove_breakpoints ();
2029
2030 current_top_target ()->disconnect (args, from_tty);
2031 }
2032
2033 /* See target/target.h. */
2034
2035 ptid_t
2036 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2037 {
2038 return current_top_target ()->wait (ptid, status, options);
2039 }
2040
2041 /* See target.h. */
2042
2043 ptid_t
2044 default_target_wait (struct target_ops *ops,
2045 ptid_t ptid, struct target_waitstatus *status,
2046 int options)
2047 {
2048 status->kind = TARGET_WAITKIND_IGNORE;
2049 return minus_one_ptid;
2050 }
2051
2052 const char *
2053 target_pid_to_str (ptid_t ptid)
2054 {
2055 return current_top_target ()->pid_to_str (ptid);
2056 }
2057
2058 const char *
2059 target_thread_name (struct thread_info *info)
2060 {
2061 return current_top_target ()->thread_name (info);
2062 }
2063
2064 struct thread_info *
2065 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2066 int handle_len,
2067 struct inferior *inf)
2068 {
2069 return current_top_target ()->thread_handle_to_thread_info (thread_handle,
2070 handle_len, inf);
2071 }
2072
2073 void
2074 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2075 {
2076 target_dcache_invalidate ();
2077
2078 current_top_target ()->resume (ptid, step, signal);
2079
2080 registers_changed_ptid (ptid);
2081 /* We only set the internal executing state here. The user/frontend
2082 running state is set at a higher level. This also clears the
2083 thread's stop_pc as side effect. */
2084 set_executing (ptid, 1);
2085 clear_inline_frame_state (ptid);
2086 }
2087
2088 /* If true, target_commit_resume is a nop. */
2089 static int defer_target_commit_resume;
2090
2091 /* See target.h. */
2092
2093 void
2094 target_commit_resume (void)
2095 {
2096 if (defer_target_commit_resume)
2097 return;
2098
2099 current_top_target ()->commit_resume ();
2100 }
2101
2102 /* See target.h. */
2103
2104 scoped_restore_tmpl<int>
2105 make_scoped_defer_target_commit_resume ()
2106 {
2107 return make_scoped_restore (&defer_target_commit_resume, 1);
2108 }
2109
2110 void
2111 target_pass_signals (int numsigs, unsigned char *pass_signals)
2112 {
2113 current_top_target ()->pass_signals (numsigs, pass_signals);
2114 }
2115
2116 void
2117 target_program_signals (int numsigs, unsigned char *program_signals)
2118 {
2119 current_top_target ()->program_signals (numsigs, program_signals);
2120 }
2121
2122 static int
2123 default_follow_fork (struct target_ops *self, int follow_child,
2124 int detach_fork)
2125 {
2126 /* Some target returned a fork event, but did not know how to follow it. */
2127 internal_error (__FILE__, __LINE__,
2128 _("could not find a target to follow fork"));
2129 }
2130
2131 /* Look through the list of possible targets for a target that can
2132 follow forks. */
2133
2134 int
2135 target_follow_fork (int follow_child, int detach_fork)
2136 {
2137 return current_top_target ()->follow_fork (follow_child, detach_fork);
2138 }
2139
2140 /* Target wrapper for follow exec hook. */
2141
2142 void
2143 target_follow_exec (struct inferior *inf, char *execd_pathname)
2144 {
2145 current_top_target ()->follow_exec (inf, execd_pathname);
2146 }
2147
2148 static void
2149 default_mourn_inferior (struct target_ops *self)
2150 {
2151 internal_error (__FILE__, __LINE__,
2152 _("could not find a target to follow mourn inferior"));
2153 }
2154
2155 void
2156 target_mourn_inferior (ptid_t ptid)
2157 {
2158 gdb_assert (ptid == inferior_ptid);
2159 current_top_target ()->mourn_inferior ();
2160
2161 /* We no longer need to keep handles on any of the object files.
2162 Make sure to release them to avoid unnecessarily locking any
2163 of them while we're not actually debugging. */
2164 bfd_cache_close_all ();
2165 }
2166
2167 /* Look for a target which can describe architectural features, starting
2168 from TARGET. If we find one, return its description. */
2169
2170 const struct target_desc *
2171 target_read_description (struct target_ops *target)
2172 {
2173 return target->read_description ();
2174 }
2175
2176 /* This implements a basic search of memory, reading target memory and
2177 performing the search here (as opposed to performing the search in on the
2178 target side with, for example, gdbserver). */
2179
2180 int
2181 simple_search_memory (struct target_ops *ops,
2182 CORE_ADDR start_addr, ULONGEST search_space_len,
2183 const gdb_byte *pattern, ULONGEST pattern_len,
2184 CORE_ADDR *found_addrp)
2185 {
2186 /* NOTE: also defined in find.c testcase. */
2187 #define SEARCH_CHUNK_SIZE 16000
2188 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2189 /* Buffer to hold memory contents for searching. */
2190 unsigned search_buf_size;
2191
2192 search_buf_size = chunk_size + pattern_len - 1;
2193
2194 /* No point in trying to allocate a buffer larger than the search space. */
2195 if (search_space_len < search_buf_size)
2196 search_buf_size = search_space_len;
2197
2198 gdb::byte_vector search_buf (search_buf_size);
2199
2200 /* Prime the search buffer. */
2201
2202 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2203 search_buf.data (), start_addr, search_buf_size)
2204 != search_buf_size)
2205 {
2206 warning (_("Unable to access %s bytes of target "
2207 "memory at %s, halting search."),
2208 pulongest (search_buf_size), hex_string (start_addr));
2209 return -1;
2210 }
2211
2212 /* Perform the search.
2213
2214 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2215 When we've scanned N bytes we copy the trailing bytes to the start and
2216 read in another N bytes. */
2217
2218 while (search_space_len >= pattern_len)
2219 {
2220 gdb_byte *found_ptr;
2221 unsigned nr_search_bytes
2222 = std::min (search_space_len, (ULONGEST) search_buf_size);
2223
2224 found_ptr = (gdb_byte *) memmem (search_buf.data (), nr_search_bytes,
2225 pattern, pattern_len);
2226
2227 if (found_ptr != NULL)
2228 {
2229 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf.data ());
2230
2231 *found_addrp = found_addr;
2232 return 1;
2233 }
2234
2235 /* Not found in this chunk, skip to next chunk. */
2236
2237 /* Don't let search_space_len wrap here, it's unsigned. */
2238 if (search_space_len >= chunk_size)
2239 search_space_len -= chunk_size;
2240 else
2241 search_space_len = 0;
2242
2243 if (search_space_len >= pattern_len)
2244 {
2245 unsigned keep_len = search_buf_size - chunk_size;
2246 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2247 int nr_to_read;
2248
2249 /* Copy the trailing part of the previous iteration to the front
2250 of the buffer for the next iteration. */
2251 gdb_assert (keep_len == pattern_len - 1);
2252 memcpy (&search_buf[0], &search_buf[chunk_size], keep_len);
2253
2254 nr_to_read = std::min (search_space_len - keep_len,
2255 (ULONGEST) chunk_size);
2256
2257 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2258 &search_buf[keep_len], read_addr,
2259 nr_to_read) != nr_to_read)
2260 {
2261 warning (_("Unable to access %s bytes of target "
2262 "memory at %s, halting search."),
2263 plongest (nr_to_read),
2264 hex_string (read_addr));
2265 return -1;
2266 }
2267
2268 start_addr += chunk_size;
2269 }
2270 }
2271
2272 /* Not found. */
2273
2274 return 0;
2275 }
2276
2277 /* Default implementation of memory-searching. */
2278
2279 static int
2280 default_search_memory (struct target_ops *self,
2281 CORE_ADDR start_addr, ULONGEST search_space_len,
2282 const gdb_byte *pattern, ULONGEST pattern_len,
2283 CORE_ADDR *found_addrp)
2284 {
2285 /* Start over from the top of the target stack. */
2286 return simple_search_memory (current_top_target (),
2287 start_addr, search_space_len,
2288 pattern, pattern_len, found_addrp);
2289 }
2290
2291 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2292 sequence of bytes in PATTERN with length PATTERN_LEN.
2293
2294 The result is 1 if found, 0 if not found, and -1 if there was an error
2295 requiring halting of the search (e.g. memory read error).
2296 If the pattern is found the address is recorded in FOUND_ADDRP. */
2297
2298 int
2299 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2300 const gdb_byte *pattern, ULONGEST pattern_len,
2301 CORE_ADDR *found_addrp)
2302 {
2303 return current_top_target ()->search_memory (start_addr, search_space_len,
2304 pattern, pattern_len, found_addrp);
2305 }
2306
2307 /* Look through the currently pushed targets. If none of them will
2308 be able to restart the currently running process, issue an error
2309 message. */
2310
2311 void
2312 target_require_runnable (void)
2313 {
2314 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2315 {
2316 /* If this target knows how to create a new program, then
2317 assume we will still be able to after killing the current
2318 one. Either killing and mourning will not pop T, or else
2319 find_default_run_target will find it again. */
2320 if (t->can_create_inferior ())
2321 return;
2322
2323 /* Do not worry about targets at certain strata that can not
2324 create inferiors. Assume they will be pushed again if
2325 necessary, and continue to the process_stratum. */
2326 if (t->to_stratum > process_stratum)
2327 continue;
2328
2329 error (_("The \"%s\" target does not support \"run\". "
2330 "Try \"help target\" or \"continue\"."),
2331 t->shortname ());
2332 }
2333
2334 /* This function is only called if the target is running. In that
2335 case there should have been a process_stratum target and it
2336 should either know how to create inferiors, or not... */
2337 internal_error (__FILE__, __LINE__, _("No targets found"));
2338 }
2339
2340 /* Whether GDB is allowed to fall back to the default run target for
2341 "run", "attach", etc. when no target is connected yet. */
2342 static int auto_connect_native_target = 1;
2343
2344 static void
2345 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2346 struct cmd_list_element *c, const char *value)
2347 {
2348 fprintf_filtered (file,
2349 _("Whether GDB may automatically connect to the "
2350 "native target is %s.\n"),
2351 value);
2352 }
2353
2354 /* A pointer to the target that can respond to "run" or "attach".
2355 Native targets are always singletons and instantiated early at GDB
2356 startup. */
2357 static target_ops *the_native_target;
2358
2359 /* See target.h. */
2360
2361 void
2362 set_native_target (target_ops *target)
2363 {
2364 if (the_native_target != NULL)
2365 internal_error (__FILE__, __LINE__,
2366 _("native target already set (\"%s\")."),
2367 the_native_target->longname ());
2368
2369 the_native_target = target;
2370 }
2371
2372 /* See target.h. */
2373
2374 target_ops *
2375 get_native_target ()
2376 {
2377 return the_native_target;
2378 }
2379
2380 /* Look through the list of possible targets for a target that can
2381 execute a run or attach command without any other data. This is
2382 used to locate the default process stratum.
2383
2384 If DO_MESG is not NULL, the result is always valid (error() is
2385 called for errors); else, return NULL on error. */
2386
2387 static struct target_ops *
2388 find_default_run_target (const char *do_mesg)
2389 {
2390 if (auto_connect_native_target && the_native_target != NULL)
2391 return the_native_target;
2392
2393 if (do_mesg != NULL)
2394 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2395 return NULL;
2396 }
2397
2398 /* See target.h. */
2399
2400 struct target_ops *
2401 find_attach_target (void)
2402 {
2403 /* If a target on the current stack can attach, use it. */
2404 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2405 {
2406 if (t->can_attach ())
2407 return t;
2408 }
2409
2410 /* Otherwise, use the default run target for attaching. */
2411 return find_default_run_target ("attach");
2412 }
2413
2414 /* See target.h. */
2415
2416 struct target_ops *
2417 find_run_target (void)
2418 {
2419 /* If a target on the current stack can run, use it. */
2420 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2421 {
2422 if (t->can_create_inferior ())
2423 return t;
2424 }
2425
2426 /* Otherwise, use the default run target. */
2427 return find_default_run_target ("run");
2428 }
2429
2430 bool
2431 target_ops::info_proc (const char *args, enum info_proc_what what)
2432 {
2433 return false;
2434 }
2435
2436 /* Implement the "info proc" command. */
2437
2438 int
2439 target_info_proc (const char *args, enum info_proc_what what)
2440 {
2441 struct target_ops *t;
2442
2443 /* If we're already connected to something that can get us OS
2444 related data, use it. Otherwise, try using the native
2445 target. */
2446 t = find_target_at (process_stratum);
2447 if (t == NULL)
2448 t = find_default_run_target (NULL);
2449
2450 for (; t != NULL; t = t->beneath ())
2451 {
2452 if (t->info_proc (args, what))
2453 {
2454 if (targetdebug)
2455 fprintf_unfiltered (gdb_stdlog,
2456 "target_info_proc (\"%s\", %d)\n", args, what);
2457
2458 return 1;
2459 }
2460 }
2461
2462 return 0;
2463 }
2464
2465 static int
2466 find_default_supports_disable_randomization (struct target_ops *self)
2467 {
2468 struct target_ops *t;
2469
2470 t = find_default_run_target (NULL);
2471 if (t != NULL)
2472 return t->supports_disable_randomization ();
2473 return 0;
2474 }
2475
2476 int
2477 target_supports_disable_randomization (void)
2478 {
2479 return current_top_target ()->supports_disable_randomization ();
2480 }
2481
2482 /* See target/target.h. */
2483
2484 int
2485 target_supports_multi_process (void)
2486 {
2487 return current_top_target ()->supports_multi_process ();
2488 }
2489
2490 /* See target.h. */
2491
2492 gdb::optional<gdb::char_vector>
2493 target_get_osdata (const char *type)
2494 {
2495 struct target_ops *t;
2496
2497 /* If we're already connected to something that can get us OS
2498 related data, use it. Otherwise, try using the native
2499 target. */
2500 t = find_target_at (process_stratum);
2501 if (t == NULL)
2502 t = find_default_run_target ("get OS data");
2503
2504 if (!t)
2505 return {};
2506
2507 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2508 }
2509
2510
2511 /* Determine the current address space of thread PTID. */
2512
2513 struct address_space *
2514 target_thread_address_space (ptid_t ptid)
2515 {
2516 struct address_space *aspace;
2517
2518 aspace = current_top_target ()->thread_address_space (ptid);
2519 gdb_assert (aspace != NULL);
2520
2521 return aspace;
2522 }
2523
2524 /* See target.h. */
2525
2526 target_ops *
2527 target_ops::beneath () const
2528 {
2529 return g_target_stack.find_beneath (this);
2530 }
2531
2532 void
2533 target_ops::close ()
2534 {
2535 }
2536
2537 bool
2538 target_ops::can_attach ()
2539 {
2540 return 0;
2541 }
2542
2543 void
2544 target_ops::attach (const char *, int)
2545 {
2546 gdb_assert_not_reached ("target_ops::attach called");
2547 }
2548
2549 bool
2550 target_ops::can_create_inferior ()
2551 {
2552 return 0;
2553 }
2554
2555 void
2556 target_ops::create_inferior (const char *, const std::string &,
2557 char **, int)
2558 {
2559 gdb_assert_not_reached ("target_ops::create_inferior called");
2560 }
2561
2562 bool
2563 target_ops::can_run ()
2564 {
2565 return false;
2566 }
2567
2568 int
2569 target_can_run ()
2570 {
2571 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2572 {
2573 if (t->can_run ())
2574 return 1;
2575 }
2576
2577 return 0;
2578 }
2579
2580 /* Target file operations. */
2581
2582 static struct target_ops *
2583 default_fileio_target (void)
2584 {
2585 struct target_ops *t;
2586
2587 /* If we're already connected to something that can perform
2588 file I/O, use it. Otherwise, try using the native target. */
2589 t = find_target_at (process_stratum);
2590 if (t != NULL)
2591 return t;
2592 return find_default_run_target ("file I/O");
2593 }
2594
2595 /* File handle for target file operations. */
2596
2597 struct fileio_fh_t
2598 {
2599 /* The target on which this file is open. NULL if the target is
2600 meanwhile closed while the handle is open. */
2601 target_ops *target;
2602
2603 /* The file descriptor on the target. */
2604 int target_fd;
2605
2606 /* Check whether this fileio_fh_t represents a closed file. */
2607 bool is_closed ()
2608 {
2609 return target_fd < 0;
2610 }
2611 };
2612
2613 /* Vector of currently open file handles. The value returned by
2614 target_fileio_open and passed as the FD argument to other
2615 target_fileio_* functions is an index into this vector. This
2616 vector's entries are never freed; instead, files are marked as
2617 closed, and the handle becomes available for reuse. */
2618 static std::vector<fileio_fh_t> fileio_fhandles;
2619
2620 /* Index into fileio_fhandles of the lowest handle that might be
2621 closed. This permits handle reuse without searching the whole
2622 list each time a new file is opened. */
2623 static int lowest_closed_fd;
2624
2625 /* Invalidate the target associated with open handles that were open
2626 on target TARG, since we're about to close (and maybe destroy) the
2627 target. The handles remain open from the client's perspective, but
2628 trying to do anything with them other than closing them will fail
2629 with EIO. */
2630
2631 static void
2632 fileio_handles_invalidate_target (target_ops *targ)
2633 {
2634 for (fileio_fh_t &fh : fileio_fhandles)
2635 if (fh.target == targ)
2636 fh.target = NULL;
2637 }
2638
2639 /* Acquire a target fileio file descriptor. */
2640
2641 static int
2642 acquire_fileio_fd (target_ops *target, int target_fd)
2643 {
2644 /* Search for closed handles to reuse. */
2645 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
2646 {
2647 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
2648
2649 if (fh.is_closed ())
2650 break;
2651 }
2652
2653 /* Push a new handle if no closed handles were found. */
2654 if (lowest_closed_fd == fileio_fhandles.size ())
2655 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
2656 else
2657 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
2658
2659 /* Should no longer be marked closed. */
2660 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
2661
2662 /* Return its index, and start the next lookup at
2663 the next index. */
2664 return lowest_closed_fd++;
2665 }
2666
2667 /* Release a target fileio file descriptor. */
2668
2669 static void
2670 release_fileio_fd (int fd, fileio_fh_t *fh)
2671 {
2672 fh->target_fd = -1;
2673 lowest_closed_fd = std::min (lowest_closed_fd, fd);
2674 }
2675
2676 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
2677
2678 static fileio_fh_t *
2679 fileio_fd_to_fh (int fd)
2680 {
2681 return &fileio_fhandles[fd];
2682 }
2683
2684
2685 /* Default implementations of file i/o methods. We don't want these
2686 to delegate automatically, because we need to know which target
2687 supported the method, in order to call it directly from within
2688 pread/pwrite, etc. */
2689
2690 int
2691 target_ops::fileio_open (struct inferior *inf, const char *filename,
2692 int flags, int mode, int warn_if_slow,
2693 int *target_errno)
2694 {
2695 *target_errno = FILEIO_ENOSYS;
2696 return -1;
2697 }
2698
2699 int
2700 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2701 ULONGEST offset, int *target_errno)
2702 {
2703 *target_errno = FILEIO_ENOSYS;
2704 return -1;
2705 }
2706
2707 int
2708 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
2709 ULONGEST offset, int *target_errno)
2710 {
2711 *target_errno = FILEIO_ENOSYS;
2712 return -1;
2713 }
2714
2715 int
2716 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
2717 {
2718 *target_errno = FILEIO_ENOSYS;
2719 return -1;
2720 }
2721
2722 int
2723 target_ops::fileio_close (int fd, int *target_errno)
2724 {
2725 *target_errno = FILEIO_ENOSYS;
2726 return -1;
2727 }
2728
2729 int
2730 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
2731 int *target_errno)
2732 {
2733 *target_errno = FILEIO_ENOSYS;
2734 return -1;
2735 }
2736
2737 gdb::optional<std::string>
2738 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
2739 int *target_errno)
2740 {
2741 *target_errno = FILEIO_ENOSYS;
2742 return {};
2743 }
2744
2745 /* Helper for target_fileio_open and
2746 target_fileio_open_warn_if_slow. */
2747
2748 static int
2749 target_fileio_open_1 (struct inferior *inf, const char *filename,
2750 int flags, int mode, int warn_if_slow,
2751 int *target_errno)
2752 {
2753 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2754 {
2755 int fd = t->fileio_open (inf, filename, flags, mode,
2756 warn_if_slow, target_errno);
2757
2758 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
2759 continue;
2760
2761 if (fd < 0)
2762 fd = -1;
2763 else
2764 fd = acquire_fileio_fd (t, fd);
2765
2766 if (targetdebug)
2767 fprintf_unfiltered (gdb_stdlog,
2768 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
2769 " = %d (%d)\n",
2770 inf == NULL ? 0 : inf->num,
2771 filename, flags, mode,
2772 warn_if_slow, fd,
2773 fd != -1 ? 0 : *target_errno);
2774 return fd;
2775 }
2776
2777 *target_errno = FILEIO_ENOSYS;
2778 return -1;
2779 }
2780
2781 /* See target.h. */
2782
2783 int
2784 target_fileio_open (struct inferior *inf, const char *filename,
2785 int flags, int mode, int *target_errno)
2786 {
2787 return target_fileio_open_1 (inf, filename, flags, mode, 0,
2788 target_errno);
2789 }
2790
2791 /* See target.h. */
2792
2793 int
2794 target_fileio_open_warn_if_slow (struct inferior *inf,
2795 const char *filename,
2796 int flags, int mode, int *target_errno)
2797 {
2798 return target_fileio_open_1 (inf, filename, flags, mode, 1,
2799 target_errno);
2800 }
2801
2802 /* See target.h. */
2803
2804 int
2805 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2806 ULONGEST offset, int *target_errno)
2807 {
2808 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2809 int ret = -1;
2810
2811 if (fh->is_closed ())
2812 *target_errno = EBADF;
2813 else if (fh->target == NULL)
2814 *target_errno = EIO;
2815 else
2816 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
2817 len, offset, target_errno);
2818
2819 if (targetdebug)
2820 fprintf_unfiltered (gdb_stdlog,
2821 "target_fileio_pwrite (%d,...,%d,%s) "
2822 "= %d (%d)\n",
2823 fd, len, pulongest (offset),
2824 ret, ret != -1 ? 0 : *target_errno);
2825 return ret;
2826 }
2827
2828 /* See target.h. */
2829
2830 int
2831 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2832 ULONGEST offset, int *target_errno)
2833 {
2834 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2835 int ret = -1;
2836
2837 if (fh->is_closed ())
2838 *target_errno = EBADF;
2839 else if (fh->target == NULL)
2840 *target_errno = EIO;
2841 else
2842 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
2843 len, offset, target_errno);
2844
2845 if (targetdebug)
2846 fprintf_unfiltered (gdb_stdlog,
2847 "target_fileio_pread (%d,...,%d,%s) "
2848 "= %d (%d)\n",
2849 fd, len, pulongest (offset),
2850 ret, ret != -1 ? 0 : *target_errno);
2851 return ret;
2852 }
2853
2854 /* See target.h. */
2855
2856 int
2857 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
2858 {
2859 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2860 int ret = -1;
2861
2862 if (fh->is_closed ())
2863 *target_errno = EBADF;
2864 else if (fh->target == NULL)
2865 *target_errno = EIO;
2866 else
2867 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
2868
2869 if (targetdebug)
2870 fprintf_unfiltered (gdb_stdlog,
2871 "target_fileio_fstat (%d) = %d (%d)\n",
2872 fd, ret, ret != -1 ? 0 : *target_errno);
2873 return ret;
2874 }
2875
2876 /* See target.h. */
2877
2878 int
2879 target_fileio_close (int fd, int *target_errno)
2880 {
2881 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2882 int ret = -1;
2883
2884 if (fh->is_closed ())
2885 *target_errno = EBADF;
2886 else
2887 {
2888 if (fh->target != NULL)
2889 ret = fh->target->fileio_close (fh->target_fd,
2890 target_errno);
2891 else
2892 ret = 0;
2893 release_fileio_fd (fd, fh);
2894 }
2895
2896 if (targetdebug)
2897 fprintf_unfiltered (gdb_stdlog,
2898 "target_fileio_close (%d) = %d (%d)\n",
2899 fd, ret, ret != -1 ? 0 : *target_errno);
2900 return ret;
2901 }
2902
2903 /* See target.h. */
2904
2905 int
2906 target_fileio_unlink (struct inferior *inf, const char *filename,
2907 int *target_errno)
2908 {
2909 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2910 {
2911 int ret = t->fileio_unlink (inf, filename, target_errno);
2912
2913 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
2914 continue;
2915
2916 if (targetdebug)
2917 fprintf_unfiltered (gdb_stdlog,
2918 "target_fileio_unlink (%d,%s)"
2919 " = %d (%d)\n",
2920 inf == NULL ? 0 : inf->num, filename,
2921 ret, ret != -1 ? 0 : *target_errno);
2922 return ret;
2923 }
2924
2925 *target_errno = FILEIO_ENOSYS;
2926 return -1;
2927 }
2928
2929 /* See target.h. */
2930
2931 gdb::optional<std::string>
2932 target_fileio_readlink (struct inferior *inf, const char *filename,
2933 int *target_errno)
2934 {
2935 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2936 {
2937 gdb::optional<std::string> ret
2938 = t->fileio_readlink (inf, filename, target_errno);
2939
2940 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
2941 continue;
2942
2943 if (targetdebug)
2944 fprintf_unfiltered (gdb_stdlog,
2945 "target_fileio_readlink (%d,%s)"
2946 " = %s (%d)\n",
2947 inf == NULL ? 0 : inf->num,
2948 filename, ret ? ret->c_str () : "(nil)",
2949 ret ? 0 : *target_errno);
2950 return ret;
2951 }
2952
2953 *target_errno = FILEIO_ENOSYS;
2954 return {};
2955 }
2956
2957 /* Like scoped_fd, but specific to target fileio. */
2958
2959 class scoped_target_fd
2960 {
2961 public:
2962 explicit scoped_target_fd (int fd) noexcept
2963 : m_fd (fd)
2964 {
2965 }
2966
2967 ~scoped_target_fd ()
2968 {
2969 if (m_fd >= 0)
2970 {
2971 int target_errno;
2972
2973 target_fileio_close (m_fd, &target_errno);
2974 }
2975 }
2976
2977 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
2978
2979 int get () const noexcept
2980 {
2981 return m_fd;
2982 }
2983
2984 private:
2985 int m_fd;
2986 };
2987
2988 /* Read target file FILENAME, in the filesystem as seen by INF. If
2989 INF is NULL, use the filesystem seen by the debugger (GDB or, for
2990 remote targets, the remote stub). Store the result in *BUF_P and
2991 return the size of the transferred data. PADDING additional bytes
2992 are available in *BUF_P. This is a helper function for
2993 target_fileio_read_alloc; see the declaration of that function for
2994 more information. */
2995
2996 static LONGEST
2997 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
2998 gdb_byte **buf_p, int padding)
2999 {
3000 size_t buf_alloc, buf_pos;
3001 gdb_byte *buf;
3002 LONGEST n;
3003 int target_errno;
3004
3005 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3006 0700, &target_errno));
3007 if (fd.get () == -1)
3008 return -1;
3009
3010 /* Start by reading up to 4K at a time. The target will throttle
3011 this number down if necessary. */
3012 buf_alloc = 4096;
3013 buf = (gdb_byte *) xmalloc (buf_alloc);
3014 buf_pos = 0;
3015 while (1)
3016 {
3017 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3018 buf_alloc - buf_pos - padding, buf_pos,
3019 &target_errno);
3020 if (n < 0)
3021 {
3022 /* An error occurred. */
3023 xfree (buf);
3024 return -1;
3025 }
3026 else if (n == 0)
3027 {
3028 /* Read all there was. */
3029 if (buf_pos == 0)
3030 xfree (buf);
3031 else
3032 *buf_p = buf;
3033 return buf_pos;
3034 }
3035
3036 buf_pos += n;
3037
3038 /* If the buffer is filling up, expand it. */
3039 if (buf_alloc < buf_pos * 2)
3040 {
3041 buf_alloc *= 2;
3042 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3043 }
3044
3045 QUIT;
3046 }
3047 }
3048
3049 /* See target.h. */
3050
3051 LONGEST
3052 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3053 gdb_byte **buf_p)
3054 {
3055 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3056 }
3057
3058 /* See target.h. */
3059
3060 gdb::unique_xmalloc_ptr<char>
3061 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3062 {
3063 gdb_byte *buffer;
3064 char *bufstr;
3065 LONGEST i, transferred;
3066
3067 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3068 bufstr = (char *) buffer;
3069
3070 if (transferred < 0)
3071 return gdb::unique_xmalloc_ptr<char> (nullptr);
3072
3073 if (transferred == 0)
3074 return gdb::unique_xmalloc_ptr<char> (xstrdup (""));
3075
3076 bufstr[transferred] = 0;
3077
3078 /* Check for embedded NUL bytes; but allow trailing NULs. */
3079 for (i = strlen (bufstr); i < transferred; i++)
3080 if (bufstr[i] != 0)
3081 {
3082 warning (_("target file %s "
3083 "contained unexpected null characters"),
3084 filename);
3085 break;
3086 }
3087
3088 return gdb::unique_xmalloc_ptr<char> (bufstr);
3089 }
3090
3091
3092 static int
3093 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3094 CORE_ADDR addr, int len)
3095 {
3096 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3097 }
3098
3099 static int
3100 default_watchpoint_addr_within_range (struct target_ops *target,
3101 CORE_ADDR addr,
3102 CORE_ADDR start, int length)
3103 {
3104 return addr >= start && addr < start + length;
3105 }
3106
3107 /* See target.h. */
3108
3109 target_ops *
3110 target_stack::find_beneath (const target_ops *t) const
3111 {
3112 /* Look for a non-empty slot at stratum levels beneath T's. */
3113 for (int stratum = t->to_stratum - 1; stratum >= 0; --stratum)
3114 if (m_stack[stratum] != NULL)
3115 return m_stack[stratum];
3116
3117 return NULL;
3118 }
3119
3120 /* See target.h. */
3121
3122 struct target_ops *
3123 find_target_at (enum strata stratum)
3124 {
3125 return g_target_stack.at (stratum);
3126 }
3127
3128 \f
3129
3130 /* See target.h */
3131
3132 void
3133 target_announce_detach (int from_tty)
3134 {
3135 pid_t pid;
3136 const char *exec_file;
3137
3138 if (!from_tty)
3139 return;
3140
3141 exec_file = get_exec_file (0);
3142 if (exec_file == NULL)
3143 exec_file = "";
3144
3145 pid = inferior_ptid.pid ();
3146 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3147 target_pid_to_str (ptid_t (pid)));
3148 gdb_flush (gdb_stdout);
3149 }
3150
3151 /* The inferior process has died. Long live the inferior! */
3152
3153 void
3154 generic_mourn_inferior (void)
3155 {
3156 inferior *inf = current_inferior ();
3157
3158 inferior_ptid = null_ptid;
3159
3160 /* Mark breakpoints uninserted in case something tries to delete a
3161 breakpoint while we delete the inferior's threads (which would
3162 fail, since the inferior is long gone). */
3163 mark_breakpoints_out ();
3164
3165 if (inf->pid != 0)
3166 exit_inferior (inf);
3167
3168 /* Note this wipes step-resume breakpoints, so needs to be done
3169 after exit_inferior, which ends up referencing the step-resume
3170 breakpoints through clear_thread_inferior_resources. */
3171 breakpoint_init_inferior (inf_exited);
3172
3173 registers_changed ();
3174
3175 reopen_exec_file ();
3176 reinit_frame_cache ();
3177
3178 if (deprecated_detach_hook)
3179 deprecated_detach_hook ();
3180 }
3181 \f
3182 /* Convert a normal process ID to a string. Returns the string in a
3183 static buffer. */
3184
3185 const char *
3186 normal_pid_to_str (ptid_t ptid)
3187 {
3188 static char buf[32];
3189
3190 xsnprintf (buf, sizeof buf, "process %d", ptid.pid ());
3191 return buf;
3192 }
3193
3194 static const char *
3195 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3196 {
3197 return normal_pid_to_str (ptid);
3198 }
3199
3200 /* Error-catcher for target_find_memory_regions. */
3201 static int
3202 dummy_find_memory_regions (struct target_ops *self,
3203 find_memory_region_ftype ignore1, void *ignore2)
3204 {
3205 error (_("Command not implemented for this target."));
3206 return 0;
3207 }
3208
3209 /* Error-catcher for target_make_corefile_notes. */
3210 static char *
3211 dummy_make_corefile_notes (struct target_ops *self,
3212 bfd *ignore1, int *ignore2)
3213 {
3214 error (_("Command not implemented for this target."));
3215 return NULL;
3216 }
3217
3218 #include "target-delegates.c"
3219
3220
3221 static const target_info dummy_target_info = {
3222 "None",
3223 N_("None"),
3224 ""
3225 };
3226
3227 dummy_target::dummy_target ()
3228 {
3229 to_stratum = dummy_stratum;
3230 }
3231
3232 debug_target::debug_target ()
3233 {
3234 to_stratum = debug_stratum;
3235 }
3236
3237 const target_info &
3238 dummy_target::info () const
3239 {
3240 return dummy_target_info;
3241 }
3242
3243 const target_info &
3244 debug_target::info () const
3245 {
3246 return beneath ()->info ();
3247 }
3248
3249 \f
3250
3251 void
3252 target_close (struct target_ops *targ)
3253 {
3254 gdb_assert (!target_is_pushed (targ));
3255
3256 fileio_handles_invalidate_target (targ);
3257
3258 targ->close ();
3259
3260 if (targetdebug)
3261 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3262 }
3263
3264 int
3265 target_thread_alive (ptid_t ptid)
3266 {
3267 return current_top_target ()->thread_alive (ptid);
3268 }
3269
3270 void
3271 target_update_thread_list (void)
3272 {
3273 current_top_target ()->update_thread_list ();
3274 }
3275
3276 void
3277 target_stop (ptid_t ptid)
3278 {
3279 if (!may_stop)
3280 {
3281 warning (_("May not interrupt or stop the target, ignoring attempt"));
3282 return;
3283 }
3284
3285 current_top_target ()->stop (ptid);
3286 }
3287
3288 void
3289 target_interrupt ()
3290 {
3291 if (!may_stop)
3292 {
3293 warning (_("May not interrupt or stop the target, ignoring attempt"));
3294 return;
3295 }
3296
3297 current_top_target ()->interrupt ();
3298 }
3299
3300 /* See target.h. */
3301
3302 void
3303 target_pass_ctrlc (void)
3304 {
3305 current_top_target ()->pass_ctrlc ();
3306 }
3307
3308 /* See target.h. */
3309
3310 void
3311 default_target_pass_ctrlc (struct target_ops *ops)
3312 {
3313 target_interrupt ();
3314 }
3315
3316 /* See target/target.h. */
3317
3318 void
3319 target_stop_and_wait (ptid_t ptid)
3320 {
3321 struct target_waitstatus status;
3322 int was_non_stop = non_stop;
3323
3324 non_stop = 1;
3325 target_stop (ptid);
3326
3327 memset (&status, 0, sizeof (status));
3328 target_wait (ptid, &status, 0);
3329
3330 non_stop = was_non_stop;
3331 }
3332
3333 /* See target/target.h. */
3334
3335 void
3336 target_continue_no_signal (ptid_t ptid)
3337 {
3338 target_resume (ptid, 0, GDB_SIGNAL_0);
3339 }
3340
3341 /* See target/target.h. */
3342
3343 void
3344 target_continue (ptid_t ptid, enum gdb_signal signal)
3345 {
3346 target_resume (ptid, 0, signal);
3347 }
3348
3349 /* Concatenate ELEM to LIST, a comma-separated list. */
3350
3351 static void
3352 str_comma_list_concat_elem (std::string *list, const char *elem)
3353 {
3354 if (!list->empty ())
3355 list->append (", ");
3356
3357 list->append (elem);
3358 }
3359
3360 /* Helper for target_options_to_string. If OPT is present in
3361 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3362 OPT is removed from TARGET_OPTIONS. */
3363
3364 static void
3365 do_option (int *target_options, std::string *ret,
3366 int opt, const char *opt_str)
3367 {
3368 if ((*target_options & opt) != 0)
3369 {
3370 str_comma_list_concat_elem (ret, opt_str);
3371 *target_options &= ~opt;
3372 }
3373 }
3374
3375 /* See target.h. */
3376
3377 std::string
3378 target_options_to_string (int target_options)
3379 {
3380 std::string ret;
3381
3382 #define DO_TARG_OPTION(OPT) \
3383 do_option (&target_options, &ret, OPT, #OPT)
3384
3385 DO_TARG_OPTION (TARGET_WNOHANG);
3386
3387 if (target_options != 0)
3388 str_comma_list_concat_elem (&ret, "unknown???");
3389
3390 return ret;
3391 }
3392
3393 void
3394 target_fetch_registers (struct regcache *regcache, int regno)
3395 {
3396 current_top_target ()->fetch_registers (regcache, regno);
3397 if (targetdebug)
3398 regcache->debug_print_register ("target_fetch_registers", regno);
3399 }
3400
3401 void
3402 target_store_registers (struct regcache *regcache, int regno)
3403 {
3404 if (!may_write_registers)
3405 error (_("Writing to registers is not allowed (regno %d)"), regno);
3406
3407 current_top_target ()->store_registers (regcache, regno);
3408 if (targetdebug)
3409 {
3410 regcache->debug_print_register ("target_store_registers", regno);
3411 }
3412 }
3413
3414 int
3415 target_core_of_thread (ptid_t ptid)
3416 {
3417 return current_top_target ()->core_of_thread (ptid);
3418 }
3419
3420 int
3421 simple_verify_memory (struct target_ops *ops,
3422 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3423 {
3424 LONGEST total_xfered = 0;
3425
3426 while (total_xfered < size)
3427 {
3428 ULONGEST xfered_len;
3429 enum target_xfer_status status;
3430 gdb_byte buf[1024];
3431 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3432
3433 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3434 buf, NULL, lma + total_xfered, howmuch,
3435 &xfered_len);
3436 if (status == TARGET_XFER_OK
3437 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3438 {
3439 total_xfered += xfered_len;
3440 QUIT;
3441 }
3442 else
3443 return 0;
3444 }
3445 return 1;
3446 }
3447
3448 /* Default implementation of memory verification. */
3449
3450 static int
3451 default_verify_memory (struct target_ops *self,
3452 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3453 {
3454 /* Start over from the top of the target stack. */
3455 return simple_verify_memory (current_top_target (),
3456 data, memaddr, size);
3457 }
3458
3459 int
3460 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3461 {
3462 return current_top_target ()->verify_memory (data, memaddr, size);
3463 }
3464
3465 /* The documentation for this function is in its prototype declaration in
3466 target.h. */
3467
3468 int
3469 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3470 enum target_hw_bp_type rw)
3471 {
3472 return current_top_target ()->insert_mask_watchpoint (addr, mask, rw);
3473 }
3474
3475 /* The documentation for this function is in its prototype declaration in
3476 target.h. */
3477
3478 int
3479 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3480 enum target_hw_bp_type rw)
3481 {
3482 return current_top_target ()->remove_mask_watchpoint (addr, mask, rw);
3483 }
3484
3485 /* The documentation for this function is in its prototype declaration
3486 in target.h. */
3487
3488 int
3489 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3490 {
3491 return current_top_target ()->masked_watch_num_registers (addr, mask);
3492 }
3493
3494 /* The documentation for this function is in its prototype declaration
3495 in target.h. */
3496
3497 int
3498 target_ranged_break_num_registers (void)
3499 {
3500 return current_top_target ()->ranged_break_num_registers ();
3501 }
3502
3503 /* See target.h. */
3504
3505 struct btrace_target_info *
3506 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
3507 {
3508 return current_top_target ()->enable_btrace (ptid, conf);
3509 }
3510
3511 /* See target.h. */
3512
3513 void
3514 target_disable_btrace (struct btrace_target_info *btinfo)
3515 {
3516 current_top_target ()->disable_btrace (btinfo);
3517 }
3518
3519 /* See target.h. */
3520
3521 void
3522 target_teardown_btrace (struct btrace_target_info *btinfo)
3523 {
3524 current_top_target ()->teardown_btrace (btinfo);
3525 }
3526
3527 /* See target.h. */
3528
3529 enum btrace_error
3530 target_read_btrace (struct btrace_data *btrace,
3531 struct btrace_target_info *btinfo,
3532 enum btrace_read_type type)
3533 {
3534 return current_top_target ()->read_btrace (btrace, btinfo, type);
3535 }
3536
3537 /* See target.h. */
3538
3539 const struct btrace_config *
3540 target_btrace_conf (const struct btrace_target_info *btinfo)
3541 {
3542 return current_top_target ()->btrace_conf (btinfo);
3543 }
3544
3545 /* See target.h. */
3546
3547 void
3548 target_stop_recording (void)
3549 {
3550 current_top_target ()->stop_recording ();
3551 }
3552
3553 /* See target.h. */
3554
3555 void
3556 target_save_record (const char *filename)
3557 {
3558 current_top_target ()->save_record (filename);
3559 }
3560
3561 /* See target.h. */
3562
3563 int
3564 target_supports_delete_record ()
3565 {
3566 return current_top_target ()->supports_delete_record ();
3567 }
3568
3569 /* See target.h. */
3570
3571 void
3572 target_delete_record (void)
3573 {
3574 current_top_target ()->delete_record ();
3575 }
3576
3577 /* See target.h. */
3578
3579 enum record_method
3580 target_record_method (ptid_t ptid)
3581 {
3582 return current_top_target ()->record_method (ptid);
3583 }
3584
3585 /* See target.h. */
3586
3587 int
3588 target_record_is_replaying (ptid_t ptid)
3589 {
3590 return current_top_target ()->record_is_replaying (ptid);
3591 }
3592
3593 /* See target.h. */
3594
3595 int
3596 target_record_will_replay (ptid_t ptid, int dir)
3597 {
3598 return current_top_target ()->record_will_replay (ptid, dir);
3599 }
3600
3601 /* See target.h. */
3602
3603 void
3604 target_record_stop_replaying (void)
3605 {
3606 current_top_target ()->record_stop_replaying ();
3607 }
3608
3609 /* See target.h. */
3610
3611 void
3612 target_goto_record_begin (void)
3613 {
3614 current_top_target ()->goto_record_begin ();
3615 }
3616
3617 /* See target.h. */
3618
3619 void
3620 target_goto_record_end (void)
3621 {
3622 current_top_target ()->goto_record_end ();
3623 }
3624
3625 /* See target.h. */
3626
3627 void
3628 target_goto_record (ULONGEST insn)
3629 {
3630 current_top_target ()->goto_record (insn);
3631 }
3632
3633 /* See target.h. */
3634
3635 void
3636 target_insn_history (int size, gdb_disassembly_flags flags)
3637 {
3638 current_top_target ()->insn_history (size, flags);
3639 }
3640
3641 /* See target.h. */
3642
3643 void
3644 target_insn_history_from (ULONGEST from, int size,
3645 gdb_disassembly_flags flags)
3646 {
3647 current_top_target ()->insn_history_from (from, size, flags);
3648 }
3649
3650 /* See target.h. */
3651
3652 void
3653 target_insn_history_range (ULONGEST begin, ULONGEST end,
3654 gdb_disassembly_flags flags)
3655 {
3656 current_top_target ()->insn_history_range (begin, end, flags);
3657 }
3658
3659 /* See target.h. */
3660
3661 void
3662 target_call_history (int size, record_print_flags flags)
3663 {
3664 current_top_target ()->call_history (size, flags);
3665 }
3666
3667 /* See target.h. */
3668
3669 void
3670 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
3671 {
3672 current_top_target ()->call_history_from (begin, size, flags);
3673 }
3674
3675 /* See target.h. */
3676
3677 void
3678 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
3679 {
3680 current_top_target ()->call_history_range (begin, end, flags);
3681 }
3682
3683 /* See target.h. */
3684
3685 const struct frame_unwind *
3686 target_get_unwinder (void)
3687 {
3688 return current_top_target ()->get_unwinder ();
3689 }
3690
3691 /* See target.h. */
3692
3693 const struct frame_unwind *
3694 target_get_tailcall_unwinder (void)
3695 {
3696 return current_top_target ()->get_tailcall_unwinder ();
3697 }
3698
3699 /* See target.h. */
3700
3701 void
3702 target_prepare_to_generate_core (void)
3703 {
3704 current_top_target ()->prepare_to_generate_core ();
3705 }
3706
3707 /* See target.h. */
3708
3709 void
3710 target_done_generating_core (void)
3711 {
3712 current_top_target ()->done_generating_core ();
3713 }
3714
3715 \f
3716
3717 static char targ_desc[] =
3718 "Names of targets and files being debugged.\nShows the entire \
3719 stack of targets currently in use (including the exec-file,\n\
3720 core-file, and process, if any), as well as the symbol file name.";
3721
3722 static void
3723 default_rcmd (struct target_ops *self, const char *command,
3724 struct ui_file *output)
3725 {
3726 error (_("\"monitor\" command not supported by this target."));
3727 }
3728
3729 static void
3730 do_monitor_command (const char *cmd, int from_tty)
3731 {
3732 target_rcmd (cmd, gdb_stdtarg);
3733 }
3734
3735 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
3736 ignored. */
3737
3738 void
3739 flash_erase_command (const char *cmd, int from_tty)
3740 {
3741 /* Used to communicate termination of flash operations to the target. */
3742 bool found_flash_region = false;
3743 struct gdbarch *gdbarch = target_gdbarch ();
3744
3745 std::vector<mem_region> mem_regions = target_memory_map ();
3746
3747 /* Iterate over all memory regions. */
3748 for (const mem_region &m : mem_regions)
3749 {
3750 /* Is this a flash memory region? */
3751 if (m.attrib.mode == MEM_FLASH)
3752 {
3753 found_flash_region = true;
3754 target_flash_erase (m.lo, m.hi - m.lo);
3755
3756 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
3757
3758 current_uiout->message (_("Erasing flash memory region at address "));
3759 current_uiout->field_fmt ("address", "%s", paddress (gdbarch, m.lo));
3760 current_uiout->message (", size = ");
3761 current_uiout->field_fmt ("size", "%s", hex_string (m.hi - m.lo));
3762 current_uiout->message ("\n");
3763 }
3764 }
3765
3766 /* Did we do any flash operations? If so, we need to finalize them. */
3767 if (found_flash_region)
3768 target_flash_done ();
3769 else
3770 current_uiout->message (_("No flash memory regions found.\n"));
3771 }
3772
3773 /* Print the name of each layers of our target stack. */
3774
3775 static void
3776 maintenance_print_target_stack (const char *cmd, int from_tty)
3777 {
3778 printf_filtered (_("The current target stack is:\n"));
3779
3780 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
3781 {
3782 if (t->to_stratum == debug_stratum)
3783 continue;
3784 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
3785 }
3786 }
3787
3788 /* See target.h. */
3789
3790 void
3791 target_async (int enable)
3792 {
3793 infrun_async (enable);
3794 current_top_target ()->async (enable);
3795 }
3796
3797 /* See target.h. */
3798
3799 void
3800 target_thread_events (int enable)
3801 {
3802 current_top_target ()->thread_events (enable);
3803 }
3804
3805 /* Controls if targets can report that they can/are async. This is
3806 just for maintainers to use when debugging gdb. */
3807 int target_async_permitted = 1;
3808
3809 /* The set command writes to this variable. If the inferior is
3810 executing, target_async_permitted is *not* updated. */
3811 static int target_async_permitted_1 = 1;
3812
3813 static void
3814 maint_set_target_async_command (const char *args, int from_tty,
3815 struct cmd_list_element *c)
3816 {
3817 if (have_live_inferiors ())
3818 {
3819 target_async_permitted_1 = target_async_permitted;
3820 error (_("Cannot change this setting while the inferior is running."));
3821 }
3822
3823 target_async_permitted = target_async_permitted_1;
3824 }
3825
3826 static void
3827 maint_show_target_async_command (struct ui_file *file, int from_tty,
3828 struct cmd_list_element *c,
3829 const char *value)
3830 {
3831 fprintf_filtered (file,
3832 _("Controlling the inferior in "
3833 "asynchronous mode is %s.\n"), value);
3834 }
3835
3836 /* Return true if the target operates in non-stop mode even with "set
3837 non-stop off". */
3838
3839 static int
3840 target_always_non_stop_p (void)
3841 {
3842 return current_top_target ()->always_non_stop_p ();
3843 }
3844
3845 /* See target.h. */
3846
3847 int
3848 target_is_non_stop_p (void)
3849 {
3850 return (non_stop
3851 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
3852 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
3853 && target_always_non_stop_p ()));
3854 }
3855
3856 /* Controls if targets can report that they always run in non-stop
3857 mode. This is just for maintainers to use when debugging gdb. */
3858 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
3859
3860 /* The set command writes to this variable. If the inferior is
3861 executing, target_non_stop_enabled is *not* updated. */
3862 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
3863
3864 /* Implementation of "maint set target-non-stop". */
3865
3866 static void
3867 maint_set_target_non_stop_command (const char *args, int from_tty,
3868 struct cmd_list_element *c)
3869 {
3870 if (have_live_inferiors ())
3871 {
3872 target_non_stop_enabled_1 = target_non_stop_enabled;
3873 error (_("Cannot change this setting while the inferior is running."));
3874 }
3875
3876 target_non_stop_enabled = target_non_stop_enabled_1;
3877 }
3878
3879 /* Implementation of "maint show target-non-stop". */
3880
3881 static void
3882 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
3883 struct cmd_list_element *c,
3884 const char *value)
3885 {
3886 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
3887 fprintf_filtered (file,
3888 _("Whether the target is always in non-stop mode "
3889 "is %s (currently %s).\n"), value,
3890 target_always_non_stop_p () ? "on" : "off");
3891 else
3892 fprintf_filtered (file,
3893 _("Whether the target is always in non-stop mode "
3894 "is %s.\n"), value);
3895 }
3896
3897 /* Temporary copies of permission settings. */
3898
3899 static int may_write_registers_1 = 1;
3900 static int may_write_memory_1 = 1;
3901 static int may_insert_breakpoints_1 = 1;
3902 static int may_insert_tracepoints_1 = 1;
3903 static int may_insert_fast_tracepoints_1 = 1;
3904 static int may_stop_1 = 1;
3905
3906 /* Make the user-set values match the real values again. */
3907
3908 void
3909 update_target_permissions (void)
3910 {
3911 may_write_registers_1 = may_write_registers;
3912 may_write_memory_1 = may_write_memory;
3913 may_insert_breakpoints_1 = may_insert_breakpoints;
3914 may_insert_tracepoints_1 = may_insert_tracepoints;
3915 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3916 may_stop_1 = may_stop;
3917 }
3918
3919 /* The one function handles (most of) the permission flags in the same
3920 way. */
3921
3922 static void
3923 set_target_permissions (const char *args, int from_tty,
3924 struct cmd_list_element *c)
3925 {
3926 if (target_has_execution)
3927 {
3928 update_target_permissions ();
3929 error (_("Cannot change this setting while the inferior is running."));
3930 }
3931
3932 /* Make the real values match the user-changed values. */
3933 may_write_registers = may_write_registers_1;
3934 may_insert_breakpoints = may_insert_breakpoints_1;
3935 may_insert_tracepoints = may_insert_tracepoints_1;
3936 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3937 may_stop = may_stop_1;
3938 update_observer_mode ();
3939 }
3940
3941 /* Set memory write permission independently of observer mode. */
3942
3943 static void
3944 set_write_memory_permission (const char *args, int from_tty,
3945 struct cmd_list_element *c)
3946 {
3947 /* Make the real values match the user-changed values. */
3948 may_write_memory = may_write_memory_1;
3949 update_observer_mode ();
3950 }
3951
3952 void
3953 initialize_targets (void)
3954 {
3955 the_dummy_target = new dummy_target ();
3956 push_target (the_dummy_target);
3957
3958 the_debug_target = new debug_target ();
3959
3960 add_info ("target", info_target_command, targ_desc);
3961 add_info ("files", info_target_command, targ_desc);
3962
3963 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3964 Set target debugging."), _("\
3965 Show target debugging."), _("\
3966 When non-zero, target debugging is enabled. Higher numbers are more\n\
3967 verbose."),
3968 set_targetdebug,
3969 show_targetdebug,
3970 &setdebuglist, &showdebuglist);
3971
3972 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3973 &trust_readonly, _("\
3974 Set mode for reading from readonly sections."), _("\
3975 Show mode for reading from readonly sections."), _("\
3976 When this mode is on, memory reads from readonly sections (such as .text)\n\
3977 will be read from the object file instead of from the target. This will\n\
3978 result in significant performance improvement for remote targets."),
3979 NULL,
3980 show_trust_readonly,
3981 &setlist, &showlist);
3982
3983 add_com ("monitor", class_obscure, do_monitor_command,
3984 _("Send a command to the remote monitor (remote targets only)."));
3985
3986 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3987 _("Print the name of each layer of the internal target stack."),
3988 &maintenanceprintlist);
3989
3990 add_setshow_boolean_cmd ("target-async", no_class,
3991 &target_async_permitted_1, _("\
3992 Set whether gdb controls the inferior in asynchronous mode."), _("\
3993 Show whether gdb controls the inferior in asynchronous mode."), _("\
3994 Tells gdb whether to control the inferior in asynchronous mode."),
3995 maint_set_target_async_command,
3996 maint_show_target_async_command,
3997 &maintenance_set_cmdlist,
3998 &maintenance_show_cmdlist);
3999
4000 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4001 &target_non_stop_enabled_1, _("\
4002 Set whether gdb always controls the inferior in non-stop mode."), _("\
4003 Show whether gdb always controls the inferior in non-stop mode."), _("\
4004 Tells gdb whether to control the inferior in non-stop mode."),
4005 maint_set_target_non_stop_command,
4006 maint_show_target_non_stop_command,
4007 &maintenance_set_cmdlist,
4008 &maintenance_show_cmdlist);
4009
4010 add_setshow_boolean_cmd ("may-write-registers", class_support,
4011 &may_write_registers_1, _("\
4012 Set permission to write into registers."), _("\
4013 Show permission to write into registers."), _("\
4014 When this permission is on, GDB may write into the target's registers.\n\
4015 Otherwise, any sort of write attempt will result in an error."),
4016 set_target_permissions, NULL,
4017 &setlist, &showlist);
4018
4019 add_setshow_boolean_cmd ("may-write-memory", class_support,
4020 &may_write_memory_1, _("\
4021 Set permission to write into target memory."), _("\
4022 Show permission to write into target memory."), _("\
4023 When this permission is on, GDB may write into the target's memory.\n\
4024 Otherwise, any sort of write attempt will result in an error."),
4025 set_write_memory_permission, NULL,
4026 &setlist, &showlist);
4027
4028 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4029 &may_insert_breakpoints_1, _("\
4030 Set permission to insert breakpoints in the target."), _("\
4031 Show permission to insert breakpoints in the target."), _("\
4032 When this permission is on, GDB may insert breakpoints in the program.\n\
4033 Otherwise, any sort of insertion attempt will result in an error."),
4034 set_target_permissions, NULL,
4035 &setlist, &showlist);
4036
4037 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4038 &may_insert_tracepoints_1, _("\
4039 Set permission to insert tracepoints in the target."), _("\
4040 Show permission to insert tracepoints in the target."), _("\
4041 When this permission is on, GDB may insert tracepoints in the program.\n\
4042 Otherwise, any sort of insertion attempt will result in an error."),
4043 set_target_permissions, NULL,
4044 &setlist, &showlist);
4045
4046 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4047 &may_insert_fast_tracepoints_1, _("\
4048 Set permission to insert fast tracepoints in the target."), _("\
4049 Show permission to insert fast tracepoints in the target."), _("\
4050 When this permission is on, GDB may insert fast tracepoints.\n\
4051 Otherwise, any sort of insertion attempt will result in an error."),
4052 set_target_permissions, NULL,
4053 &setlist, &showlist);
4054
4055 add_setshow_boolean_cmd ("may-interrupt", class_support,
4056 &may_stop_1, _("\
4057 Set permission to interrupt or signal the target."), _("\
4058 Show permission to interrupt or signal the target."), _("\
4059 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4060 Otherwise, any attempt to interrupt or stop will be ignored."),
4061 set_target_permissions, NULL,
4062 &setlist, &showlist);
4063
4064 add_com ("flash-erase", no_class, flash_erase_command,
4065 _("Erase all flash memory regions."));
4066
4067 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4068 &auto_connect_native_target, _("\
4069 Set whether GDB may automatically connect to the native target."), _("\
4070 Show whether GDB may automatically connect to the native target."), _("\
4071 When on, and GDB is not connected to a target yet, GDB\n\
4072 attempts \"run\" and other commands with the native target."),
4073 NULL, show_auto_connect_native_target,
4074 &setlist, &showlist);
4075 }
This page took 0.182888 seconds and 4 git commands to generate.