Add "info connections" command, "info inferiors" connection number/string
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2020 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "terminal.h"
51 #include <unordered_map>
52 #include "target-connection.h"
53
54 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
55
56 static void default_terminal_info (struct target_ops *, const char *, int);
57
58 static int default_watchpoint_addr_within_range (struct target_ops *,
59 CORE_ADDR, CORE_ADDR, int);
60
61 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
62 CORE_ADDR, int);
63
64 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
65
66 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
67 long lwp, long tid);
68
69 static int default_follow_fork (struct target_ops *self, int follow_child,
70 int detach_fork);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static void tcomplain (void) ATTRIBUTE_NORETURN;
86
87 static struct target_ops *find_default_run_target (const char *);
88
89 static int dummy_find_memory_regions (struct target_ops *self,
90 find_memory_region_ftype ignore1,
91 void *ignore2);
92
93 static char *dummy_make_corefile_notes (struct target_ops *self,
94 bfd *ignore1, int *ignore2);
95
96 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 /* Mapping between target_info objects (which have address identity)
102 and corresponding open/factory function/callback. Each add_target
103 call adds one entry to this map, and registers a "target
104 TARGET_NAME" command that when invoked calls the factory registered
105 here. The target_info object is associated with the command via
106 the command's context. */
107 static std::unordered_map<const target_info *, target_open_ftype *>
108 target_factories;
109
110 /* The singleton debug target. */
111
112 static struct target_ops *the_debug_target;
113
114 /* Top of target stack. */
115 /* The target structure we are currently using to talk to a process
116 or file or whatever "inferior" we have. */
117
118 target_ops *
119 current_top_target ()
120 {
121 return current_inferior ()->top_target ();
122 }
123
124 /* Command list for target. */
125
126 static struct cmd_list_element *targetlist = NULL;
127
128 /* True if we should trust readonly sections from the
129 executable when reading memory. */
130
131 static bool trust_readonly = false;
132
133 /* Nonzero if we should show true memory content including
134 memory breakpoint inserted by gdb. */
135
136 static int show_memory_breakpoints = 0;
137
138 /* These globals control whether GDB attempts to perform these
139 operations; they are useful for targets that need to prevent
140 inadvertent disruption, such as in non-stop mode. */
141
142 bool may_write_registers = true;
143
144 bool may_write_memory = true;
145
146 bool may_insert_breakpoints = true;
147
148 bool may_insert_tracepoints = true;
149
150 bool may_insert_fast_tracepoints = true;
151
152 bool may_stop = true;
153
154 /* Non-zero if we want to see trace of target level stuff. */
155
156 static unsigned int targetdebug = 0;
157
158 static void
159 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
160 {
161 if (targetdebug)
162 push_target (the_debug_target);
163 else
164 unpush_target (the_debug_target);
165 }
166
167 static void
168 show_targetdebug (struct ui_file *file, int from_tty,
169 struct cmd_list_element *c, const char *value)
170 {
171 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
172 }
173
174 /* The user just typed 'target' without the name of a target. */
175
176 static void
177 target_command (const char *arg, int from_tty)
178 {
179 fputs_filtered ("Argument required (target name). Try `help target'\n",
180 gdb_stdout);
181 }
182
183 int
184 target_has_all_memory_1 (void)
185 {
186 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
187 if (t->has_all_memory ())
188 return 1;
189
190 return 0;
191 }
192
193 int
194 target_has_memory_1 (void)
195 {
196 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
197 if (t->has_memory ())
198 return 1;
199
200 return 0;
201 }
202
203 int
204 target_has_stack_1 (void)
205 {
206 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
207 if (t->has_stack ())
208 return 1;
209
210 return 0;
211 }
212
213 int
214 target_has_registers_1 (void)
215 {
216 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
217 if (t->has_registers ())
218 return 1;
219
220 return 0;
221 }
222
223 bool
224 target_has_execution_1 (inferior *inf)
225 {
226 for (target_ops *t = inf->top_target ();
227 t != nullptr;
228 t = inf->find_target_beneath (t))
229 if (t->has_execution (inf))
230 return true;
231
232 return false;
233 }
234
235 int
236 target_has_execution_current (void)
237 {
238 return target_has_execution_1 (current_inferior ());
239 }
240
241 /* This is used to implement the various target commands. */
242
243 static void
244 open_target (const char *args, int from_tty, struct cmd_list_element *command)
245 {
246 auto *ti = static_cast<target_info *> (get_cmd_context (command));
247 target_open_ftype *func = target_factories[ti];
248
249 if (targetdebug)
250 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
251 ti->shortname);
252
253 func (args, from_tty);
254
255 if (targetdebug)
256 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
257 ti->shortname, args, from_tty);
258 }
259
260 /* See target.h. */
261
262 void
263 add_target (const target_info &t, target_open_ftype *func,
264 completer_ftype *completer)
265 {
266 struct cmd_list_element *c;
267
268 auto &func_slot = target_factories[&t];
269 if (func_slot != nullptr)
270 internal_error (__FILE__, __LINE__,
271 _("target already added (\"%s\")."), t.shortname);
272 func_slot = func;
273
274 if (targetlist == NULL)
275 add_prefix_cmd ("target", class_run, target_command, _("\
276 Connect to a target machine or process.\n\
277 The first argument is the type or protocol of the target machine.\n\
278 Remaining arguments are interpreted by the target protocol. For more\n\
279 information on the arguments for a particular protocol, type\n\
280 `help target ' followed by the protocol name."),
281 &targetlist, "target ", 0, &cmdlist);
282 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
283 set_cmd_context (c, (void *) &t);
284 set_cmd_sfunc (c, open_target);
285 if (completer != NULL)
286 set_cmd_completer (c, completer);
287 }
288
289 /* See target.h. */
290
291 void
292 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
293 {
294 struct cmd_list_element *c;
295 char *alt;
296
297 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
298 see PR cli/15104. */
299 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
300 set_cmd_sfunc (c, open_target);
301 set_cmd_context (c, (void *) &tinfo);
302 alt = xstrprintf ("target %s", tinfo.shortname);
303 deprecate_cmd (c, alt);
304 }
305
306 /* Stub functions */
307
308 void
309 target_kill (void)
310 {
311 current_top_target ()->kill ();
312 }
313
314 void
315 target_load (const char *arg, int from_tty)
316 {
317 target_dcache_invalidate ();
318 current_top_target ()->load (arg, from_tty);
319 }
320
321 /* Define it. */
322
323 target_terminal_state target_terminal::m_terminal_state
324 = target_terminal_state::is_ours;
325
326 /* See target/target.h. */
327
328 void
329 target_terminal::init (void)
330 {
331 current_top_target ()->terminal_init ();
332
333 m_terminal_state = target_terminal_state::is_ours;
334 }
335
336 /* See target/target.h. */
337
338 void
339 target_terminal::inferior (void)
340 {
341 struct ui *ui = current_ui;
342
343 /* A background resume (``run&'') should leave GDB in control of the
344 terminal. */
345 if (ui->prompt_state != PROMPT_BLOCKED)
346 return;
347
348 /* Since we always run the inferior in the main console (unless "set
349 inferior-tty" is in effect), when some UI other than the main one
350 calls target_terminal::inferior, then we leave the main UI's
351 terminal settings as is. */
352 if (ui != main_ui)
353 return;
354
355 /* If GDB is resuming the inferior in the foreground, install
356 inferior's terminal modes. */
357
358 struct inferior *inf = current_inferior ();
359
360 if (inf->terminal_state != target_terminal_state::is_inferior)
361 {
362 current_top_target ()->terminal_inferior ();
363 inf->terminal_state = target_terminal_state::is_inferior;
364 }
365
366 m_terminal_state = target_terminal_state::is_inferior;
367
368 /* If the user hit C-c before, pretend that it was hit right
369 here. */
370 if (check_quit_flag ())
371 target_pass_ctrlc ();
372 }
373
374 /* See target/target.h. */
375
376 void
377 target_terminal::restore_inferior (void)
378 {
379 struct ui *ui = current_ui;
380
381 /* See target_terminal::inferior(). */
382 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
383 return;
384
385 /* Restore the terminal settings of inferiors that were in the
386 foreground but are now ours_for_output due to a temporary
387 target_target::ours_for_output() call. */
388
389 {
390 scoped_restore_current_inferior restore_inferior;
391
392 for (::inferior *inf : all_inferiors ())
393 {
394 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
395 {
396 set_current_inferior (inf);
397 current_top_target ()->terminal_inferior ();
398 inf->terminal_state = target_terminal_state::is_inferior;
399 }
400 }
401 }
402
403 m_terminal_state = target_terminal_state::is_inferior;
404
405 /* If the user hit C-c before, pretend that it was hit right
406 here. */
407 if (check_quit_flag ())
408 target_pass_ctrlc ();
409 }
410
411 /* Switch terminal state to DESIRED_STATE, either is_ours, or
412 is_ours_for_output. */
413
414 static void
415 target_terminal_is_ours_kind (target_terminal_state desired_state)
416 {
417 scoped_restore_current_inferior restore_inferior;
418
419 /* Must do this in two passes. First, have all inferiors save the
420 current terminal settings. Then, after all inferiors have add a
421 chance to safely save the terminal settings, restore GDB's
422 terminal settings. */
423
424 for (inferior *inf : all_inferiors ())
425 {
426 if (inf->terminal_state == target_terminal_state::is_inferior)
427 {
428 set_current_inferior (inf);
429 current_top_target ()->terminal_save_inferior ();
430 }
431 }
432
433 for (inferior *inf : all_inferiors ())
434 {
435 /* Note we don't check is_inferior here like above because we
436 need to handle 'is_ours_for_output -> is_ours' too. Careful
437 to never transition from 'is_ours' to 'is_ours_for_output',
438 though. */
439 if (inf->terminal_state != target_terminal_state::is_ours
440 && inf->terminal_state != desired_state)
441 {
442 set_current_inferior (inf);
443 if (desired_state == target_terminal_state::is_ours)
444 current_top_target ()->terminal_ours ();
445 else if (desired_state == target_terminal_state::is_ours_for_output)
446 current_top_target ()->terminal_ours_for_output ();
447 else
448 gdb_assert_not_reached ("unhandled desired state");
449 inf->terminal_state = desired_state;
450 }
451 }
452 }
453
454 /* See target/target.h. */
455
456 void
457 target_terminal::ours ()
458 {
459 struct ui *ui = current_ui;
460
461 /* See target_terminal::inferior. */
462 if (ui != main_ui)
463 return;
464
465 if (m_terminal_state == target_terminal_state::is_ours)
466 return;
467
468 target_terminal_is_ours_kind (target_terminal_state::is_ours);
469 m_terminal_state = target_terminal_state::is_ours;
470 }
471
472 /* See target/target.h. */
473
474 void
475 target_terminal::ours_for_output ()
476 {
477 struct ui *ui = current_ui;
478
479 /* See target_terminal::inferior. */
480 if (ui != main_ui)
481 return;
482
483 if (!target_terminal::is_inferior ())
484 return;
485
486 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
487 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
488 }
489
490 /* See target/target.h. */
491
492 void
493 target_terminal::info (const char *arg, int from_tty)
494 {
495 current_top_target ()->terminal_info (arg, from_tty);
496 }
497
498 /* See target.h. */
499
500 bool
501 target_supports_terminal_ours (void)
502 {
503 /* The current top target is the target at the top of the target
504 stack of the current inferior. While normally there's always an
505 inferior, we must check for nullptr here because we can get here
506 very early during startup, before the initial inferior is first
507 created. */
508 inferior *inf = current_inferior ();
509
510 if (inf == nullptr)
511 return false;
512 return inf->top_target ()->supports_terminal_ours ();
513 }
514
515 static void
516 tcomplain (void)
517 {
518 error (_("You can't do that when your target is `%s'"),
519 current_top_target ()->shortname ());
520 }
521
522 void
523 noprocess (void)
524 {
525 error (_("You can't do that without a process to debug."));
526 }
527
528 static void
529 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
530 {
531 printf_unfiltered (_("No saved terminal information.\n"));
532 }
533
534 /* A default implementation for the to_get_ada_task_ptid target method.
535
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
538 inferior_ptid. */
539
540 static ptid_t
541 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
542 {
543 return ptid_t (inferior_ptid.pid (), lwp, tid);
544 }
545
546 static enum exec_direction_kind
547 default_execution_direction (struct target_ops *self)
548 {
549 if (!target_can_execute_reverse)
550 return EXEC_FORWARD;
551 else if (!target_can_async_p ())
552 return EXEC_FORWARD;
553 else
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
556 }
557
558 /* See target.h. */
559
560 void
561 decref_target (target_ops *t)
562 {
563 t->decref ();
564 if (t->refcount () == 0)
565 {
566 if (t->stratum () == process_stratum)
567 connection_list_remove (as_process_stratum_target (t));
568 target_close (t);
569 }
570 }
571
572 /* See target.h. */
573
574 void
575 target_stack::push (target_ops *t)
576 {
577 t->incref ();
578
579 strata stratum = t->stratum ();
580
581 if (stratum == process_stratum)
582 connection_list_add (as_process_stratum_target (t));
583
584 /* If there's already a target at this stratum, remove it. */
585
586 if (m_stack[stratum] != NULL)
587 unpush (m_stack[stratum]);
588
589 /* Now add the new one. */
590 m_stack[stratum] = t;
591
592 if (m_top < stratum)
593 m_top = stratum;
594 }
595
596 /* See target.h. */
597
598 void
599 push_target (struct target_ops *t)
600 {
601 current_inferior ()->push_target (t);
602 }
603
604 /* See target.h. */
605
606 void
607 push_target (target_ops_up &&t)
608 {
609 current_inferior ()->push_target (t.get ());
610 t.release ();
611 }
612
613 /* See target.h. */
614
615 int
616 unpush_target (struct target_ops *t)
617 {
618 return current_inferior ()->unpush_target (t);
619 }
620
621 /* See target.h. */
622
623 bool
624 target_stack::unpush (target_ops *t)
625 {
626 gdb_assert (t != NULL);
627
628 strata stratum = t->stratum ();
629
630 if (stratum == dummy_stratum)
631 internal_error (__FILE__, __LINE__,
632 _("Attempt to unpush the dummy target"));
633
634 /* Look for the specified target. Note that a target can only occur
635 once in the target stack. */
636
637 if (m_stack[stratum] != t)
638 {
639 /* If T wasn't pushed, quit. Only open targets should be
640 closed. */
641 return false;
642 }
643
644 /* Unchain the target. */
645 m_stack[stratum] = NULL;
646
647 if (m_top == stratum)
648 m_top = t->beneath ()->stratum ();
649
650 /* Finally close the target, if there are no inferiors
651 referencing this target still. Note we do this after unchaining,
652 so any target method calls from within the target_close
653 implementation don't end up in T anymore. Do leave the target
654 open if we have are other inferiors referencing this target
655 still. */
656 decref_target (t);
657
658 return true;
659 }
660
661 /* Unpush TARGET and assert that it worked. */
662
663 static void
664 unpush_target_and_assert (struct target_ops *target)
665 {
666 if (!unpush_target (target))
667 {
668 fprintf_unfiltered (gdb_stderr,
669 "pop_all_targets couldn't find target %s\n",
670 target->shortname ());
671 internal_error (__FILE__, __LINE__,
672 _("failed internal consistency check"));
673 }
674 }
675
676 void
677 pop_all_targets_above (enum strata above_stratum)
678 {
679 while ((int) (current_top_target ()->stratum ()) > (int) above_stratum)
680 unpush_target_and_assert (current_top_target ());
681 }
682
683 /* See target.h. */
684
685 void
686 pop_all_targets_at_and_above (enum strata stratum)
687 {
688 while ((int) (current_top_target ()->stratum ()) >= (int) stratum)
689 unpush_target_and_assert (current_top_target ());
690 }
691
692 void
693 pop_all_targets (void)
694 {
695 pop_all_targets_above (dummy_stratum);
696 }
697
698 /* Return true if T is now pushed in the current inferior's target
699 stack. Return false otherwise. */
700
701 bool
702 target_is_pushed (target_ops *t)
703 {
704 return current_inferior ()->target_is_pushed (t);
705 }
706
707 /* Default implementation of to_get_thread_local_address. */
708
709 static void
710 generic_tls_error (void)
711 {
712 throw_error (TLS_GENERIC_ERROR,
713 _("Cannot find thread-local variables on this target"));
714 }
715
716 /* Using the objfile specified in OBJFILE, find the address for the
717 current thread's thread-local storage with offset OFFSET. */
718 CORE_ADDR
719 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
720 {
721 volatile CORE_ADDR addr = 0;
722 struct target_ops *target = current_top_target ();
723 struct gdbarch *gdbarch = target_gdbarch ();
724
725 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
726 {
727 ptid_t ptid = inferior_ptid;
728
729 try
730 {
731 CORE_ADDR lm_addr;
732
733 /* Fetch the load module address for this objfile. */
734 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
735 objfile);
736
737 if (gdbarch_get_thread_local_address_p (gdbarch))
738 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
739 offset);
740 else
741 addr = target->get_thread_local_address (ptid, lm_addr, offset);
742 }
743 /* If an error occurred, print TLS related messages here. Otherwise,
744 throw the error to some higher catcher. */
745 catch (const gdb_exception &ex)
746 {
747 int objfile_is_library = (objfile->flags & OBJF_SHARED);
748
749 switch (ex.error)
750 {
751 case TLS_NO_LIBRARY_SUPPORT_ERROR:
752 error (_("Cannot find thread-local variables "
753 "in this thread library."));
754 break;
755 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
756 if (objfile_is_library)
757 error (_("Cannot find shared library `%s' in dynamic"
758 " linker's load module list"), objfile_name (objfile));
759 else
760 error (_("Cannot find executable file `%s' in dynamic"
761 " linker's load module list"), objfile_name (objfile));
762 break;
763 case TLS_NOT_ALLOCATED_YET_ERROR:
764 if (objfile_is_library)
765 error (_("The inferior has not yet allocated storage for"
766 " thread-local variables in\n"
767 "the shared library `%s'\n"
768 "for %s"),
769 objfile_name (objfile),
770 target_pid_to_str (ptid).c_str ());
771 else
772 error (_("The inferior has not yet allocated storage for"
773 " thread-local variables in\n"
774 "the executable `%s'\n"
775 "for %s"),
776 objfile_name (objfile),
777 target_pid_to_str (ptid).c_str ());
778 break;
779 case TLS_GENERIC_ERROR:
780 if (objfile_is_library)
781 error (_("Cannot find thread-local storage for %s, "
782 "shared library %s:\n%s"),
783 target_pid_to_str (ptid).c_str (),
784 objfile_name (objfile), ex.what ());
785 else
786 error (_("Cannot find thread-local storage for %s, "
787 "executable file %s:\n%s"),
788 target_pid_to_str (ptid).c_str (),
789 objfile_name (objfile), ex.what ());
790 break;
791 default:
792 throw;
793 break;
794 }
795 }
796 }
797 else
798 error (_("Cannot find thread-local variables on this target"));
799
800 return addr;
801 }
802
803 const char *
804 target_xfer_status_to_string (enum target_xfer_status status)
805 {
806 #define CASE(X) case X: return #X
807 switch (status)
808 {
809 CASE(TARGET_XFER_E_IO);
810 CASE(TARGET_XFER_UNAVAILABLE);
811 default:
812 return "<unknown>";
813 }
814 #undef CASE
815 };
816
817
818 #undef MIN
819 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
820
821 /* target_read_string -- read a null terminated string, up to LEN bytes,
822 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
823 Set *STRING to a pointer to malloc'd memory containing the data; the caller
824 is responsible for freeing it. Return the number of bytes successfully
825 read. */
826
827 int
828 target_read_string (CORE_ADDR memaddr, gdb::unique_xmalloc_ptr<char> *string,
829 int len, int *errnop)
830 {
831 int tlen, offset, i;
832 gdb_byte buf[4];
833 int errcode = 0;
834 char *buffer;
835 int buffer_allocated;
836 char *bufptr;
837 unsigned int nbytes_read = 0;
838
839 gdb_assert (string);
840
841 /* Small for testing. */
842 buffer_allocated = 4;
843 buffer = (char *) xmalloc (buffer_allocated);
844 bufptr = buffer;
845
846 while (len > 0)
847 {
848 tlen = MIN (len, 4 - (memaddr & 3));
849 offset = memaddr & 3;
850
851 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
852 if (errcode != 0)
853 {
854 /* The transfer request might have crossed the boundary to an
855 unallocated region of memory. Retry the transfer, requesting
856 a single byte. */
857 tlen = 1;
858 offset = 0;
859 errcode = target_read_memory (memaddr, buf, 1);
860 if (errcode != 0)
861 goto done;
862 }
863
864 if (bufptr - buffer + tlen > buffer_allocated)
865 {
866 unsigned int bytes;
867
868 bytes = bufptr - buffer;
869 buffer_allocated *= 2;
870 buffer = (char *) xrealloc (buffer, buffer_allocated);
871 bufptr = buffer + bytes;
872 }
873
874 for (i = 0; i < tlen; i++)
875 {
876 *bufptr++ = buf[i + offset];
877 if (buf[i + offset] == '\000')
878 {
879 nbytes_read += i + 1;
880 goto done;
881 }
882 }
883
884 memaddr += tlen;
885 len -= tlen;
886 nbytes_read += tlen;
887 }
888 done:
889 string->reset (buffer);
890 if (errnop != NULL)
891 *errnop = errcode;
892 return nbytes_read;
893 }
894
895 struct target_section_table *
896 target_get_section_table (struct target_ops *target)
897 {
898 return target->get_section_table ();
899 }
900
901 /* Find a section containing ADDR. */
902
903 struct target_section *
904 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
905 {
906 struct target_section_table *table = target_get_section_table (target);
907 struct target_section *secp;
908
909 if (table == NULL)
910 return NULL;
911
912 for (secp = table->sections; secp < table->sections_end; secp++)
913 {
914 if (addr >= secp->addr && addr < secp->endaddr)
915 return secp;
916 }
917 return NULL;
918 }
919
920
921 /* Helper for the memory xfer routines. Checks the attributes of the
922 memory region of MEMADDR against the read or write being attempted.
923 If the access is permitted returns true, otherwise returns false.
924 REGION_P is an optional output parameter. If not-NULL, it is
925 filled with a pointer to the memory region of MEMADDR. REG_LEN
926 returns LEN trimmed to the end of the region. This is how much the
927 caller can continue requesting, if the access is permitted. A
928 single xfer request must not straddle memory region boundaries. */
929
930 static int
931 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
932 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
933 struct mem_region **region_p)
934 {
935 struct mem_region *region;
936
937 region = lookup_mem_region (memaddr);
938
939 if (region_p != NULL)
940 *region_p = region;
941
942 switch (region->attrib.mode)
943 {
944 case MEM_RO:
945 if (writebuf != NULL)
946 return 0;
947 break;
948
949 case MEM_WO:
950 if (readbuf != NULL)
951 return 0;
952 break;
953
954 case MEM_FLASH:
955 /* We only support writing to flash during "load" for now. */
956 if (writebuf != NULL)
957 error (_("Writing to flash memory forbidden in this context"));
958 break;
959
960 case MEM_NONE:
961 return 0;
962 }
963
964 /* region->hi == 0 means there's no upper bound. */
965 if (memaddr + len < region->hi || region->hi == 0)
966 *reg_len = len;
967 else
968 *reg_len = region->hi - memaddr;
969
970 return 1;
971 }
972
973 /* Read memory from more than one valid target. A core file, for
974 instance, could have some of memory but delegate other bits to
975 the target below it. So, we must manually try all targets. */
976
977 enum target_xfer_status
978 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
979 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
980 ULONGEST *xfered_len)
981 {
982 enum target_xfer_status res;
983
984 do
985 {
986 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
987 readbuf, writebuf, memaddr, len,
988 xfered_len);
989 if (res == TARGET_XFER_OK)
990 break;
991
992 /* Stop if the target reports that the memory is not available. */
993 if (res == TARGET_XFER_UNAVAILABLE)
994 break;
995
996 /* We want to continue past core files to executables, but not
997 past a running target's memory. */
998 if (ops->has_all_memory ())
999 break;
1000
1001 ops = ops->beneath ();
1002 }
1003 while (ops != NULL);
1004
1005 /* The cache works at the raw memory level. Make sure the cache
1006 gets updated with raw contents no matter what kind of memory
1007 object was originally being written. Note we do write-through
1008 first, so that if it fails, we don't write to the cache contents
1009 that never made it to the target. */
1010 if (writebuf != NULL
1011 && inferior_ptid != null_ptid
1012 && target_dcache_init_p ()
1013 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1014 {
1015 DCACHE *dcache = target_dcache_get ();
1016
1017 /* Note that writing to an area of memory which wasn't present
1018 in the cache doesn't cause it to be loaded in. */
1019 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1020 }
1021
1022 return res;
1023 }
1024
1025 /* Perform a partial memory transfer.
1026 For docs see target.h, to_xfer_partial. */
1027
1028 static enum target_xfer_status
1029 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1030 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1031 ULONGEST len, ULONGEST *xfered_len)
1032 {
1033 enum target_xfer_status res;
1034 ULONGEST reg_len;
1035 struct mem_region *region;
1036 struct inferior *inf;
1037
1038 /* For accesses to unmapped overlay sections, read directly from
1039 files. Must do this first, as MEMADDR may need adjustment. */
1040 if (readbuf != NULL && overlay_debugging)
1041 {
1042 struct obj_section *section = find_pc_overlay (memaddr);
1043
1044 if (pc_in_unmapped_range (memaddr, section))
1045 {
1046 struct target_section_table *table
1047 = target_get_section_table (ops);
1048 const char *section_name = section->the_bfd_section->name;
1049
1050 memaddr = overlay_mapped_address (memaddr, section);
1051 return section_table_xfer_memory_partial (readbuf, writebuf,
1052 memaddr, len, xfered_len,
1053 table->sections,
1054 table->sections_end,
1055 section_name);
1056 }
1057 }
1058
1059 /* Try the executable files, if "trust-readonly-sections" is set. */
1060 if (readbuf != NULL && trust_readonly)
1061 {
1062 struct target_section *secp;
1063 struct target_section_table *table;
1064
1065 secp = target_section_by_addr (ops, memaddr);
1066 if (secp != NULL
1067 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1068 {
1069 table = target_get_section_table (ops);
1070 return section_table_xfer_memory_partial (readbuf, writebuf,
1071 memaddr, len, xfered_len,
1072 table->sections,
1073 table->sections_end,
1074 NULL);
1075 }
1076 }
1077
1078 /* Try GDB's internal data cache. */
1079
1080 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1081 &region))
1082 return TARGET_XFER_E_IO;
1083
1084 if (inferior_ptid != null_ptid)
1085 inf = current_inferior ();
1086 else
1087 inf = NULL;
1088
1089 if (inf != NULL
1090 && readbuf != NULL
1091 /* The dcache reads whole cache lines; that doesn't play well
1092 with reading from a trace buffer, because reading outside of
1093 the collected memory range fails. */
1094 && get_traceframe_number () == -1
1095 && (region->attrib.cache
1096 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1097 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1098 {
1099 DCACHE *dcache = target_dcache_get_or_init ();
1100
1101 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1102 reg_len, xfered_len);
1103 }
1104
1105 /* If none of those methods found the memory we wanted, fall back
1106 to a target partial transfer. Normally a single call to
1107 to_xfer_partial is enough; if it doesn't recognize an object
1108 it will call the to_xfer_partial of the next target down.
1109 But for memory this won't do. Memory is the only target
1110 object which can be read from more than one valid target.
1111 A core file, for instance, could have some of memory but
1112 delegate other bits to the target below it. So, we must
1113 manually try all targets. */
1114
1115 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1116 xfered_len);
1117
1118 /* If we still haven't got anything, return the last error. We
1119 give up. */
1120 return res;
1121 }
1122
1123 /* Perform a partial memory transfer. For docs see target.h,
1124 to_xfer_partial. */
1125
1126 static enum target_xfer_status
1127 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1128 gdb_byte *readbuf, const gdb_byte *writebuf,
1129 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1130 {
1131 enum target_xfer_status res;
1132
1133 /* Zero length requests are ok and require no work. */
1134 if (len == 0)
1135 return TARGET_XFER_EOF;
1136
1137 memaddr = address_significant (target_gdbarch (), memaddr);
1138
1139 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1140 breakpoint insns, thus hiding out from higher layers whether
1141 there are software breakpoints inserted in the code stream. */
1142 if (readbuf != NULL)
1143 {
1144 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1145 xfered_len);
1146
1147 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1148 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1149 }
1150 else
1151 {
1152 /* A large write request is likely to be partially satisfied
1153 by memory_xfer_partial_1. We will continually malloc
1154 and free a copy of the entire write request for breakpoint
1155 shadow handling even though we only end up writing a small
1156 subset of it. Cap writes to a limit specified by the target
1157 to mitigate this. */
1158 len = std::min (ops->get_memory_xfer_limit (), len);
1159
1160 gdb::byte_vector buf (writebuf, writebuf + len);
1161 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1162 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1163 xfered_len);
1164 }
1165
1166 return res;
1167 }
1168
1169 scoped_restore_tmpl<int>
1170 make_scoped_restore_show_memory_breakpoints (int show)
1171 {
1172 return make_scoped_restore (&show_memory_breakpoints, show);
1173 }
1174
1175 /* For docs see target.h, to_xfer_partial. */
1176
1177 enum target_xfer_status
1178 target_xfer_partial (struct target_ops *ops,
1179 enum target_object object, const char *annex,
1180 gdb_byte *readbuf, const gdb_byte *writebuf,
1181 ULONGEST offset, ULONGEST len,
1182 ULONGEST *xfered_len)
1183 {
1184 enum target_xfer_status retval;
1185
1186 /* Transfer is done when LEN is zero. */
1187 if (len == 0)
1188 return TARGET_XFER_EOF;
1189
1190 if (writebuf && !may_write_memory)
1191 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1192 core_addr_to_string_nz (offset), plongest (len));
1193
1194 *xfered_len = 0;
1195
1196 /* If this is a memory transfer, let the memory-specific code
1197 have a look at it instead. Memory transfers are more
1198 complicated. */
1199 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1200 || object == TARGET_OBJECT_CODE_MEMORY)
1201 retval = memory_xfer_partial (ops, object, readbuf,
1202 writebuf, offset, len, xfered_len);
1203 else if (object == TARGET_OBJECT_RAW_MEMORY)
1204 {
1205 /* Skip/avoid accessing the target if the memory region
1206 attributes block the access. Check this here instead of in
1207 raw_memory_xfer_partial as otherwise we'd end up checking
1208 this twice in the case of the memory_xfer_partial path is
1209 taken; once before checking the dcache, and another in the
1210 tail call to raw_memory_xfer_partial. */
1211 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1212 NULL))
1213 return TARGET_XFER_E_IO;
1214
1215 /* Request the normal memory object from other layers. */
1216 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1217 xfered_len);
1218 }
1219 else
1220 retval = ops->xfer_partial (object, annex, readbuf,
1221 writebuf, offset, len, xfered_len);
1222
1223 if (targetdebug)
1224 {
1225 const unsigned char *myaddr = NULL;
1226
1227 fprintf_unfiltered (gdb_stdlog,
1228 "%s:target_xfer_partial "
1229 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1230 ops->shortname (),
1231 (int) object,
1232 (annex ? annex : "(null)"),
1233 host_address_to_string (readbuf),
1234 host_address_to_string (writebuf),
1235 core_addr_to_string_nz (offset),
1236 pulongest (len), retval,
1237 pulongest (*xfered_len));
1238
1239 if (readbuf)
1240 myaddr = readbuf;
1241 if (writebuf)
1242 myaddr = writebuf;
1243 if (retval == TARGET_XFER_OK && myaddr != NULL)
1244 {
1245 int i;
1246
1247 fputs_unfiltered (", bytes =", gdb_stdlog);
1248 for (i = 0; i < *xfered_len; i++)
1249 {
1250 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1251 {
1252 if (targetdebug < 2 && i > 0)
1253 {
1254 fprintf_unfiltered (gdb_stdlog, " ...");
1255 break;
1256 }
1257 fprintf_unfiltered (gdb_stdlog, "\n");
1258 }
1259
1260 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1261 }
1262 }
1263
1264 fputc_unfiltered ('\n', gdb_stdlog);
1265 }
1266
1267 /* Check implementations of to_xfer_partial update *XFERED_LEN
1268 properly. Do assertion after printing debug messages, so that we
1269 can find more clues on assertion failure from debugging messages. */
1270 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1271 gdb_assert (*xfered_len > 0);
1272
1273 return retval;
1274 }
1275
1276 /* Read LEN bytes of target memory at address MEMADDR, placing the
1277 results in GDB's memory at MYADDR. Returns either 0 for success or
1278 -1 if any error occurs.
1279
1280 If an error occurs, no guarantee is made about the contents of the data at
1281 MYADDR. In particular, the caller should not depend upon partial reads
1282 filling the buffer with good data. There is no way for the caller to know
1283 how much good data might have been transfered anyway. Callers that can
1284 deal with partial reads should call target_read (which will retry until
1285 it makes no progress, and then return how much was transferred). */
1286
1287 int
1288 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1289 {
1290 if (target_read (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1291 myaddr, memaddr, len) == len)
1292 return 0;
1293 else
1294 return -1;
1295 }
1296
1297 /* See target/target.h. */
1298
1299 int
1300 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1301 {
1302 gdb_byte buf[4];
1303 int r;
1304
1305 r = target_read_memory (memaddr, buf, sizeof buf);
1306 if (r != 0)
1307 return r;
1308 *result = extract_unsigned_integer (buf, sizeof buf,
1309 gdbarch_byte_order (target_gdbarch ()));
1310 return 0;
1311 }
1312
1313 /* Like target_read_memory, but specify explicitly that this is a read
1314 from the target's raw memory. That is, this read bypasses the
1315 dcache, breakpoint shadowing, etc. */
1316
1317 int
1318 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1319 {
1320 if (target_read (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1321 myaddr, memaddr, len) == len)
1322 return 0;
1323 else
1324 return -1;
1325 }
1326
1327 /* Like target_read_memory, but specify explicitly that this is a read from
1328 the target's stack. This may trigger different cache behavior. */
1329
1330 int
1331 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1332 {
1333 if (target_read (current_top_target (), TARGET_OBJECT_STACK_MEMORY, NULL,
1334 myaddr, memaddr, len) == len)
1335 return 0;
1336 else
1337 return -1;
1338 }
1339
1340 /* Like target_read_memory, but specify explicitly that this is a read from
1341 the target's code. This may trigger different cache behavior. */
1342
1343 int
1344 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1345 {
1346 if (target_read (current_top_target (), TARGET_OBJECT_CODE_MEMORY, NULL,
1347 myaddr, memaddr, len) == len)
1348 return 0;
1349 else
1350 return -1;
1351 }
1352
1353 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1354 Returns either 0 for success or -1 if any error occurs. If an
1355 error occurs, no guarantee is made about how much data got written.
1356 Callers that can deal with partial writes should call
1357 target_write. */
1358
1359 int
1360 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1361 {
1362 if (target_write (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1363 myaddr, memaddr, len) == len)
1364 return 0;
1365 else
1366 return -1;
1367 }
1368
1369 /* Write LEN bytes from MYADDR to target raw memory at address
1370 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1371 If an error occurs, no guarantee is made about how much data got
1372 written. Callers that can deal with partial writes should call
1373 target_write. */
1374
1375 int
1376 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1377 {
1378 if (target_write (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1379 myaddr, memaddr, len) == len)
1380 return 0;
1381 else
1382 return -1;
1383 }
1384
1385 /* Fetch the target's memory map. */
1386
1387 std::vector<mem_region>
1388 target_memory_map (void)
1389 {
1390 std::vector<mem_region> result = current_top_target ()->memory_map ();
1391 if (result.empty ())
1392 return result;
1393
1394 std::sort (result.begin (), result.end ());
1395
1396 /* Check that regions do not overlap. Simultaneously assign
1397 a numbering for the "mem" commands to use to refer to
1398 each region. */
1399 mem_region *last_one = NULL;
1400 for (size_t ix = 0; ix < result.size (); ix++)
1401 {
1402 mem_region *this_one = &result[ix];
1403 this_one->number = ix;
1404
1405 if (last_one != NULL && last_one->hi > this_one->lo)
1406 {
1407 warning (_("Overlapping regions in memory map: ignoring"));
1408 return std::vector<mem_region> ();
1409 }
1410
1411 last_one = this_one;
1412 }
1413
1414 return result;
1415 }
1416
1417 void
1418 target_flash_erase (ULONGEST address, LONGEST length)
1419 {
1420 current_top_target ()->flash_erase (address, length);
1421 }
1422
1423 void
1424 target_flash_done (void)
1425 {
1426 current_top_target ()->flash_done ();
1427 }
1428
1429 static void
1430 show_trust_readonly (struct ui_file *file, int from_tty,
1431 struct cmd_list_element *c, const char *value)
1432 {
1433 fprintf_filtered (file,
1434 _("Mode for reading from readonly sections is %s.\n"),
1435 value);
1436 }
1437
1438 /* Target vector read/write partial wrapper functions. */
1439
1440 static enum target_xfer_status
1441 target_read_partial (struct target_ops *ops,
1442 enum target_object object,
1443 const char *annex, gdb_byte *buf,
1444 ULONGEST offset, ULONGEST len,
1445 ULONGEST *xfered_len)
1446 {
1447 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1448 xfered_len);
1449 }
1450
1451 static enum target_xfer_status
1452 target_write_partial (struct target_ops *ops,
1453 enum target_object object,
1454 const char *annex, const gdb_byte *buf,
1455 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1456 {
1457 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1458 xfered_len);
1459 }
1460
1461 /* Wrappers to perform the full transfer. */
1462
1463 /* For docs on target_read see target.h. */
1464
1465 LONGEST
1466 target_read (struct target_ops *ops,
1467 enum target_object object,
1468 const char *annex, gdb_byte *buf,
1469 ULONGEST offset, LONGEST len)
1470 {
1471 LONGEST xfered_total = 0;
1472 int unit_size = 1;
1473
1474 /* If we are reading from a memory object, find the length of an addressable
1475 unit for that architecture. */
1476 if (object == TARGET_OBJECT_MEMORY
1477 || object == TARGET_OBJECT_STACK_MEMORY
1478 || object == TARGET_OBJECT_CODE_MEMORY
1479 || object == TARGET_OBJECT_RAW_MEMORY)
1480 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1481
1482 while (xfered_total < len)
1483 {
1484 ULONGEST xfered_partial;
1485 enum target_xfer_status status;
1486
1487 status = target_read_partial (ops, object, annex,
1488 buf + xfered_total * unit_size,
1489 offset + xfered_total, len - xfered_total,
1490 &xfered_partial);
1491
1492 /* Call an observer, notifying them of the xfer progress? */
1493 if (status == TARGET_XFER_EOF)
1494 return xfered_total;
1495 else if (status == TARGET_XFER_OK)
1496 {
1497 xfered_total += xfered_partial;
1498 QUIT;
1499 }
1500 else
1501 return TARGET_XFER_E_IO;
1502
1503 }
1504 return len;
1505 }
1506
1507 /* Assuming that the entire [begin, end) range of memory cannot be
1508 read, try to read whatever subrange is possible to read.
1509
1510 The function returns, in RESULT, either zero or one memory block.
1511 If there's a readable subrange at the beginning, it is completely
1512 read and returned. Any further readable subrange will not be read.
1513 Otherwise, if there's a readable subrange at the end, it will be
1514 completely read and returned. Any readable subranges before it
1515 (obviously, not starting at the beginning), will be ignored. In
1516 other cases -- either no readable subrange, or readable subrange(s)
1517 that is neither at the beginning, or end, nothing is returned.
1518
1519 The purpose of this function is to handle a read across a boundary
1520 of accessible memory in a case when memory map is not available.
1521 The above restrictions are fine for this case, but will give
1522 incorrect results if the memory is 'patchy'. However, supporting
1523 'patchy' memory would require trying to read every single byte,
1524 and it seems unacceptable solution. Explicit memory map is
1525 recommended for this case -- and target_read_memory_robust will
1526 take care of reading multiple ranges then. */
1527
1528 static void
1529 read_whatever_is_readable (struct target_ops *ops,
1530 const ULONGEST begin, const ULONGEST end,
1531 int unit_size,
1532 std::vector<memory_read_result> *result)
1533 {
1534 ULONGEST current_begin = begin;
1535 ULONGEST current_end = end;
1536 int forward;
1537 ULONGEST xfered_len;
1538
1539 /* If we previously failed to read 1 byte, nothing can be done here. */
1540 if (end - begin <= 1)
1541 return;
1542
1543 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
1544
1545 /* Check that either first or the last byte is readable, and give up
1546 if not. This heuristic is meant to permit reading accessible memory
1547 at the boundary of accessible region. */
1548 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1549 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
1550 {
1551 forward = 1;
1552 ++current_begin;
1553 }
1554 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1555 buf.get () + (end - begin) - 1, end - 1, 1,
1556 &xfered_len) == TARGET_XFER_OK)
1557 {
1558 forward = 0;
1559 --current_end;
1560 }
1561 else
1562 return;
1563
1564 /* Loop invariant is that the [current_begin, current_end) was previously
1565 found to be not readable as a whole.
1566
1567 Note loop condition -- if the range has 1 byte, we can't divide the range
1568 so there's no point trying further. */
1569 while (current_end - current_begin > 1)
1570 {
1571 ULONGEST first_half_begin, first_half_end;
1572 ULONGEST second_half_begin, second_half_end;
1573 LONGEST xfer;
1574 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
1575
1576 if (forward)
1577 {
1578 first_half_begin = current_begin;
1579 first_half_end = middle;
1580 second_half_begin = middle;
1581 second_half_end = current_end;
1582 }
1583 else
1584 {
1585 first_half_begin = middle;
1586 first_half_end = current_end;
1587 second_half_begin = current_begin;
1588 second_half_end = middle;
1589 }
1590
1591 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1592 buf.get () + (first_half_begin - begin) * unit_size,
1593 first_half_begin,
1594 first_half_end - first_half_begin);
1595
1596 if (xfer == first_half_end - first_half_begin)
1597 {
1598 /* This half reads up fine. So, the error must be in the
1599 other half. */
1600 current_begin = second_half_begin;
1601 current_end = second_half_end;
1602 }
1603 else
1604 {
1605 /* This half is not readable. Because we've tried one byte, we
1606 know some part of this half if actually readable. Go to the next
1607 iteration to divide again and try to read.
1608
1609 We don't handle the other half, because this function only tries
1610 to read a single readable subrange. */
1611 current_begin = first_half_begin;
1612 current_end = first_half_end;
1613 }
1614 }
1615
1616 if (forward)
1617 {
1618 /* The [begin, current_begin) range has been read. */
1619 result->emplace_back (begin, current_end, std::move (buf));
1620 }
1621 else
1622 {
1623 /* The [current_end, end) range has been read. */
1624 LONGEST region_len = end - current_end;
1625
1626 gdb::unique_xmalloc_ptr<gdb_byte> data
1627 ((gdb_byte *) xmalloc (region_len * unit_size));
1628 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
1629 region_len * unit_size);
1630 result->emplace_back (current_end, end, std::move (data));
1631 }
1632 }
1633
1634 std::vector<memory_read_result>
1635 read_memory_robust (struct target_ops *ops,
1636 const ULONGEST offset, const LONGEST len)
1637 {
1638 std::vector<memory_read_result> result;
1639 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1640
1641 LONGEST xfered_total = 0;
1642 while (xfered_total < len)
1643 {
1644 struct mem_region *region = lookup_mem_region (offset + xfered_total);
1645 LONGEST region_len;
1646
1647 /* If there is no explicit region, a fake one should be created. */
1648 gdb_assert (region);
1649
1650 if (region->hi == 0)
1651 region_len = len - xfered_total;
1652 else
1653 region_len = region->hi - offset;
1654
1655 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1656 {
1657 /* Cannot read this region. Note that we can end up here only
1658 if the region is explicitly marked inaccessible, or
1659 'inaccessible-by-default' is in effect. */
1660 xfered_total += region_len;
1661 }
1662 else
1663 {
1664 LONGEST to_read = std::min (len - xfered_total, region_len);
1665 gdb::unique_xmalloc_ptr<gdb_byte> buffer
1666 ((gdb_byte *) xmalloc (to_read * unit_size));
1667
1668 LONGEST xfered_partial =
1669 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
1670 offset + xfered_total, to_read);
1671 /* Call an observer, notifying them of the xfer progress? */
1672 if (xfered_partial <= 0)
1673 {
1674 /* Got an error reading full chunk. See if maybe we can read
1675 some subrange. */
1676 read_whatever_is_readable (ops, offset + xfered_total,
1677 offset + xfered_total + to_read,
1678 unit_size, &result);
1679 xfered_total += to_read;
1680 }
1681 else
1682 {
1683 result.emplace_back (offset + xfered_total,
1684 offset + xfered_total + xfered_partial,
1685 std::move (buffer));
1686 xfered_total += xfered_partial;
1687 }
1688 QUIT;
1689 }
1690 }
1691
1692 return result;
1693 }
1694
1695
1696 /* An alternative to target_write with progress callbacks. */
1697
1698 LONGEST
1699 target_write_with_progress (struct target_ops *ops,
1700 enum target_object object,
1701 const char *annex, const gdb_byte *buf,
1702 ULONGEST offset, LONGEST len,
1703 void (*progress) (ULONGEST, void *), void *baton)
1704 {
1705 LONGEST xfered_total = 0;
1706 int unit_size = 1;
1707
1708 /* If we are writing to a memory object, find the length of an addressable
1709 unit for that architecture. */
1710 if (object == TARGET_OBJECT_MEMORY
1711 || object == TARGET_OBJECT_STACK_MEMORY
1712 || object == TARGET_OBJECT_CODE_MEMORY
1713 || object == TARGET_OBJECT_RAW_MEMORY)
1714 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1715
1716 /* Give the progress callback a chance to set up. */
1717 if (progress)
1718 (*progress) (0, baton);
1719
1720 while (xfered_total < len)
1721 {
1722 ULONGEST xfered_partial;
1723 enum target_xfer_status status;
1724
1725 status = target_write_partial (ops, object, annex,
1726 buf + xfered_total * unit_size,
1727 offset + xfered_total, len - xfered_total,
1728 &xfered_partial);
1729
1730 if (status != TARGET_XFER_OK)
1731 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
1732
1733 if (progress)
1734 (*progress) (xfered_partial, baton);
1735
1736 xfered_total += xfered_partial;
1737 QUIT;
1738 }
1739 return len;
1740 }
1741
1742 /* For docs on target_write see target.h. */
1743
1744 LONGEST
1745 target_write (struct target_ops *ops,
1746 enum target_object object,
1747 const char *annex, const gdb_byte *buf,
1748 ULONGEST offset, LONGEST len)
1749 {
1750 return target_write_with_progress (ops, object, annex, buf, offset, len,
1751 NULL, NULL);
1752 }
1753
1754 /* Help for target_read_alloc and target_read_stralloc. See their comments
1755 for details. */
1756
1757 template <typename T>
1758 gdb::optional<gdb::def_vector<T>>
1759 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1760 const char *annex)
1761 {
1762 gdb::def_vector<T> buf;
1763 size_t buf_pos = 0;
1764 const int chunk = 4096;
1765
1766 /* This function does not have a length parameter; it reads the
1767 entire OBJECT). Also, it doesn't support objects fetched partly
1768 from one target and partly from another (in a different stratum,
1769 e.g. a core file and an executable). Both reasons make it
1770 unsuitable for reading memory. */
1771 gdb_assert (object != TARGET_OBJECT_MEMORY);
1772
1773 /* Start by reading up to 4K at a time. The target will throttle
1774 this number down if necessary. */
1775 while (1)
1776 {
1777 ULONGEST xfered_len;
1778 enum target_xfer_status status;
1779
1780 buf.resize (buf_pos + chunk);
1781
1782 status = target_read_partial (ops, object, annex,
1783 (gdb_byte *) &buf[buf_pos],
1784 buf_pos, chunk,
1785 &xfered_len);
1786
1787 if (status == TARGET_XFER_EOF)
1788 {
1789 /* Read all there was. */
1790 buf.resize (buf_pos);
1791 return buf;
1792 }
1793 else if (status != TARGET_XFER_OK)
1794 {
1795 /* An error occurred. */
1796 return {};
1797 }
1798
1799 buf_pos += xfered_len;
1800
1801 QUIT;
1802 }
1803 }
1804
1805 /* See target.h */
1806
1807 gdb::optional<gdb::byte_vector>
1808 target_read_alloc (struct target_ops *ops, enum target_object object,
1809 const char *annex)
1810 {
1811 return target_read_alloc_1<gdb_byte> (ops, object, annex);
1812 }
1813
1814 /* See target.h. */
1815
1816 gdb::optional<gdb::char_vector>
1817 target_read_stralloc (struct target_ops *ops, enum target_object object,
1818 const char *annex)
1819 {
1820 gdb::optional<gdb::char_vector> buf
1821 = target_read_alloc_1<char> (ops, object, annex);
1822
1823 if (!buf)
1824 return {};
1825
1826 if (buf->empty () || buf->back () != '\0')
1827 buf->push_back ('\0');
1828
1829 /* Check for embedded NUL bytes; but allow trailing NULs. */
1830 for (auto it = std::find (buf->begin (), buf->end (), '\0');
1831 it != buf->end (); it++)
1832 if (*it != '\0')
1833 {
1834 warning (_("target object %d, annex %s, "
1835 "contained unexpected null characters"),
1836 (int) object, annex ? annex : "(none)");
1837 break;
1838 }
1839
1840 return buf;
1841 }
1842
1843 /* Memory transfer methods. */
1844
1845 void
1846 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1847 LONGEST len)
1848 {
1849 /* This method is used to read from an alternate, non-current
1850 target. This read must bypass the overlay support (as symbols
1851 don't match this target), and GDB's internal cache (wrong cache
1852 for this target). */
1853 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1854 != len)
1855 memory_error (TARGET_XFER_E_IO, addr);
1856 }
1857
1858 ULONGEST
1859 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1860 int len, enum bfd_endian byte_order)
1861 {
1862 gdb_byte buf[sizeof (ULONGEST)];
1863
1864 gdb_assert (len <= sizeof (buf));
1865 get_target_memory (ops, addr, buf, len);
1866 return extract_unsigned_integer (buf, len, byte_order);
1867 }
1868
1869 /* See target.h. */
1870
1871 int
1872 target_insert_breakpoint (struct gdbarch *gdbarch,
1873 struct bp_target_info *bp_tgt)
1874 {
1875 if (!may_insert_breakpoints)
1876 {
1877 warning (_("May not insert breakpoints"));
1878 return 1;
1879 }
1880
1881 return current_top_target ()->insert_breakpoint (gdbarch, bp_tgt);
1882 }
1883
1884 /* See target.h. */
1885
1886 int
1887 target_remove_breakpoint (struct gdbarch *gdbarch,
1888 struct bp_target_info *bp_tgt,
1889 enum remove_bp_reason reason)
1890 {
1891 /* This is kind of a weird case to handle, but the permission might
1892 have been changed after breakpoints were inserted - in which case
1893 we should just take the user literally and assume that any
1894 breakpoints should be left in place. */
1895 if (!may_insert_breakpoints)
1896 {
1897 warning (_("May not remove breakpoints"));
1898 return 1;
1899 }
1900
1901 return current_top_target ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1902 }
1903
1904 static void
1905 info_target_command (const char *args, int from_tty)
1906 {
1907 int has_all_mem = 0;
1908
1909 if (symfile_objfile != NULL)
1910 printf_unfiltered (_("Symbols from \"%s\".\n"),
1911 objfile_name (symfile_objfile));
1912
1913 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
1914 {
1915 if (!t->has_memory ())
1916 continue;
1917
1918 if ((int) (t->stratum ()) <= (int) dummy_stratum)
1919 continue;
1920 if (has_all_mem)
1921 printf_unfiltered (_("\tWhile running this, "
1922 "GDB does not access memory from...\n"));
1923 printf_unfiltered ("%s:\n", t->longname ());
1924 t->files_info ();
1925 has_all_mem = t->has_all_memory ();
1926 }
1927 }
1928
1929 /* This function is called before any new inferior is created, e.g.
1930 by running a program, attaching, or connecting to a target.
1931 It cleans up any state from previous invocations which might
1932 change between runs. This is a subset of what target_preopen
1933 resets (things which might change between targets). */
1934
1935 void
1936 target_pre_inferior (int from_tty)
1937 {
1938 /* Clear out solib state. Otherwise the solib state of the previous
1939 inferior might have survived and is entirely wrong for the new
1940 target. This has been observed on GNU/Linux using glibc 2.3. How
1941 to reproduce:
1942
1943 bash$ ./foo&
1944 [1] 4711
1945 bash$ ./foo&
1946 [1] 4712
1947 bash$ gdb ./foo
1948 [...]
1949 (gdb) attach 4711
1950 (gdb) detach
1951 (gdb) attach 4712
1952 Cannot access memory at address 0xdeadbeef
1953 */
1954
1955 /* In some OSs, the shared library list is the same/global/shared
1956 across inferiors. If code is shared between processes, so are
1957 memory regions and features. */
1958 if (!gdbarch_has_global_solist (target_gdbarch ()))
1959 {
1960 no_shared_libraries (NULL, from_tty);
1961
1962 invalidate_target_mem_regions ();
1963
1964 target_clear_description ();
1965 }
1966
1967 /* attach_flag may be set if the previous process associated with
1968 the inferior was attached to. */
1969 current_inferior ()->attach_flag = 0;
1970
1971 current_inferior ()->highest_thread_num = 0;
1972
1973 agent_capability_invalidate ();
1974 }
1975
1976 /* This is to be called by the open routine before it does
1977 anything. */
1978
1979 void
1980 target_preopen (int from_tty)
1981 {
1982 dont_repeat ();
1983
1984 if (current_inferior ()->pid != 0)
1985 {
1986 if (!from_tty
1987 || !target_has_execution
1988 || query (_("A program is being debugged already. Kill it? ")))
1989 {
1990 /* Core inferiors actually should be detached, not
1991 killed. */
1992 if (target_has_execution)
1993 target_kill ();
1994 else
1995 target_detach (current_inferior (), 0);
1996 }
1997 else
1998 error (_("Program not killed."));
1999 }
2000
2001 /* Calling target_kill may remove the target from the stack. But if
2002 it doesn't (which seems like a win for UDI), remove it now. */
2003 /* Leave the exec target, though. The user may be switching from a
2004 live process to a core of the same program. */
2005 pop_all_targets_above (file_stratum);
2006
2007 target_pre_inferior (from_tty);
2008 }
2009
2010 /* See target.h. */
2011
2012 void
2013 target_detach (inferior *inf, int from_tty)
2014 {
2015 /* After we have detached, we will clear the register cache for this inferior
2016 by calling registers_changed_ptid. We must save the pid_ptid before
2017 detaching, as the target detach method will clear inf->pid. */
2018 ptid_t save_pid_ptid = ptid_t (inf->pid);
2019
2020 /* As long as some to_detach implementations rely on the current_inferior
2021 (either directly, or indirectly, like through target_gdbarch or by
2022 reading memory), INF needs to be the current inferior. When that
2023 requirement will become no longer true, then we can remove this
2024 assertion. */
2025 gdb_assert (inf == current_inferior ());
2026
2027 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2028 /* Don't remove global breakpoints here. They're removed on
2029 disconnection from the target. */
2030 ;
2031 else
2032 /* If we're in breakpoints-always-inserted mode, have to remove
2033 breakpoints before detaching. */
2034 remove_breakpoints_inf (current_inferior ());
2035
2036 prepare_for_detach ();
2037
2038 /* Hold a strong reference because detaching may unpush the
2039 target. */
2040 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2041
2042 current_top_target ()->detach (inf, from_tty);
2043
2044 process_stratum_target *proc_target
2045 = as_process_stratum_target (proc_target_ref.get ());
2046
2047 registers_changed_ptid (proc_target, save_pid_ptid);
2048
2049 /* We have to ensure we have no frame cache left. Normally,
2050 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2051 inferior_ptid matches save_pid_ptid, but in our case, it does not
2052 call it, as inferior_ptid has been reset. */
2053 reinit_frame_cache ();
2054 }
2055
2056 void
2057 target_disconnect (const char *args, int from_tty)
2058 {
2059 /* If we're in breakpoints-always-inserted mode or if breakpoints
2060 are global across processes, we have to remove them before
2061 disconnecting. */
2062 remove_breakpoints ();
2063
2064 current_top_target ()->disconnect (args, from_tty);
2065 }
2066
2067 /* See target/target.h. */
2068
2069 ptid_t
2070 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2071 {
2072 return current_top_target ()->wait (ptid, status, options);
2073 }
2074
2075 /* See target.h. */
2076
2077 ptid_t
2078 default_target_wait (struct target_ops *ops,
2079 ptid_t ptid, struct target_waitstatus *status,
2080 int options)
2081 {
2082 status->kind = TARGET_WAITKIND_IGNORE;
2083 return minus_one_ptid;
2084 }
2085
2086 std::string
2087 target_pid_to_str (ptid_t ptid)
2088 {
2089 return current_top_target ()->pid_to_str (ptid);
2090 }
2091
2092 const char *
2093 target_thread_name (struct thread_info *info)
2094 {
2095 gdb_assert (info->inf == current_inferior ());
2096
2097 return current_top_target ()->thread_name (info);
2098 }
2099
2100 struct thread_info *
2101 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2102 int handle_len,
2103 struct inferior *inf)
2104 {
2105 return current_top_target ()->thread_handle_to_thread_info (thread_handle,
2106 handle_len, inf);
2107 }
2108
2109 /* See target.h. */
2110
2111 gdb::byte_vector
2112 target_thread_info_to_thread_handle (struct thread_info *tip)
2113 {
2114 return current_top_target ()->thread_info_to_thread_handle (tip);
2115 }
2116
2117 void
2118 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2119 {
2120 process_stratum_target *curr_target = current_inferior ()->process_target ();
2121
2122 target_dcache_invalidate ();
2123
2124 current_top_target ()->resume (ptid, step, signal);
2125
2126 registers_changed_ptid (curr_target, ptid);
2127 /* We only set the internal executing state here. The user/frontend
2128 running state is set at a higher level. This also clears the
2129 thread's stop_pc as side effect. */
2130 set_executing (curr_target, ptid, 1);
2131 clear_inline_frame_state (curr_target, ptid);
2132 }
2133
2134 /* If true, target_commit_resume is a nop. */
2135 static int defer_target_commit_resume;
2136
2137 /* See target.h. */
2138
2139 void
2140 target_commit_resume (void)
2141 {
2142 if (defer_target_commit_resume)
2143 return;
2144
2145 current_top_target ()->commit_resume ();
2146 }
2147
2148 /* See target.h. */
2149
2150 scoped_restore_tmpl<int>
2151 make_scoped_defer_target_commit_resume ()
2152 {
2153 return make_scoped_restore (&defer_target_commit_resume, 1);
2154 }
2155
2156 void
2157 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2158 {
2159 current_top_target ()->pass_signals (pass_signals);
2160 }
2161
2162 void
2163 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2164 {
2165 current_top_target ()->program_signals (program_signals);
2166 }
2167
2168 static int
2169 default_follow_fork (struct target_ops *self, int follow_child,
2170 int detach_fork)
2171 {
2172 /* Some target returned a fork event, but did not know how to follow it. */
2173 internal_error (__FILE__, __LINE__,
2174 _("could not find a target to follow fork"));
2175 }
2176
2177 /* Look through the list of possible targets for a target that can
2178 follow forks. */
2179
2180 int
2181 target_follow_fork (int follow_child, int detach_fork)
2182 {
2183 return current_top_target ()->follow_fork (follow_child, detach_fork);
2184 }
2185
2186 /* Target wrapper for follow exec hook. */
2187
2188 void
2189 target_follow_exec (struct inferior *inf, const char *execd_pathname)
2190 {
2191 current_top_target ()->follow_exec (inf, execd_pathname);
2192 }
2193
2194 static void
2195 default_mourn_inferior (struct target_ops *self)
2196 {
2197 internal_error (__FILE__, __LINE__,
2198 _("could not find a target to follow mourn inferior"));
2199 }
2200
2201 void
2202 target_mourn_inferior (ptid_t ptid)
2203 {
2204 gdb_assert (ptid == inferior_ptid);
2205 current_top_target ()->mourn_inferior ();
2206
2207 /* We no longer need to keep handles on any of the object files.
2208 Make sure to release them to avoid unnecessarily locking any
2209 of them while we're not actually debugging. */
2210 bfd_cache_close_all ();
2211 }
2212
2213 /* Look for a target which can describe architectural features, starting
2214 from TARGET. If we find one, return its description. */
2215
2216 const struct target_desc *
2217 target_read_description (struct target_ops *target)
2218 {
2219 return target->read_description ();
2220 }
2221
2222 /* This implements a basic search of memory, reading target memory and
2223 performing the search here (as opposed to performing the search in on the
2224 target side with, for example, gdbserver). */
2225
2226 int
2227 simple_search_memory (struct target_ops *ops,
2228 CORE_ADDR start_addr, ULONGEST search_space_len,
2229 const gdb_byte *pattern, ULONGEST pattern_len,
2230 CORE_ADDR *found_addrp)
2231 {
2232 /* NOTE: also defined in find.c testcase. */
2233 #define SEARCH_CHUNK_SIZE 16000
2234 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2235 /* Buffer to hold memory contents for searching. */
2236 unsigned search_buf_size;
2237
2238 search_buf_size = chunk_size + pattern_len - 1;
2239
2240 /* No point in trying to allocate a buffer larger than the search space. */
2241 if (search_space_len < search_buf_size)
2242 search_buf_size = search_space_len;
2243
2244 gdb::byte_vector search_buf (search_buf_size);
2245
2246 /* Prime the search buffer. */
2247
2248 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2249 search_buf.data (), start_addr, search_buf_size)
2250 != search_buf_size)
2251 {
2252 warning (_("Unable to access %s bytes of target "
2253 "memory at %s, halting search."),
2254 pulongest (search_buf_size), hex_string (start_addr));
2255 return -1;
2256 }
2257
2258 /* Perform the search.
2259
2260 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2261 When we've scanned N bytes we copy the trailing bytes to the start and
2262 read in another N bytes. */
2263
2264 while (search_space_len >= pattern_len)
2265 {
2266 gdb_byte *found_ptr;
2267 unsigned nr_search_bytes
2268 = std::min (search_space_len, (ULONGEST) search_buf_size);
2269
2270 found_ptr = (gdb_byte *) memmem (search_buf.data (), nr_search_bytes,
2271 pattern, pattern_len);
2272
2273 if (found_ptr != NULL)
2274 {
2275 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf.data ());
2276
2277 *found_addrp = found_addr;
2278 return 1;
2279 }
2280
2281 /* Not found in this chunk, skip to next chunk. */
2282
2283 /* Don't let search_space_len wrap here, it's unsigned. */
2284 if (search_space_len >= chunk_size)
2285 search_space_len -= chunk_size;
2286 else
2287 search_space_len = 0;
2288
2289 if (search_space_len >= pattern_len)
2290 {
2291 unsigned keep_len = search_buf_size - chunk_size;
2292 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2293 int nr_to_read;
2294
2295 /* Copy the trailing part of the previous iteration to the front
2296 of the buffer for the next iteration. */
2297 gdb_assert (keep_len == pattern_len - 1);
2298 memcpy (&search_buf[0], &search_buf[chunk_size], keep_len);
2299
2300 nr_to_read = std::min (search_space_len - keep_len,
2301 (ULONGEST) chunk_size);
2302
2303 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2304 &search_buf[keep_len], read_addr,
2305 nr_to_read) != nr_to_read)
2306 {
2307 warning (_("Unable to access %s bytes of target "
2308 "memory at %s, halting search."),
2309 plongest (nr_to_read),
2310 hex_string (read_addr));
2311 return -1;
2312 }
2313
2314 start_addr += chunk_size;
2315 }
2316 }
2317
2318 /* Not found. */
2319
2320 return 0;
2321 }
2322
2323 /* Default implementation of memory-searching. */
2324
2325 static int
2326 default_search_memory (struct target_ops *self,
2327 CORE_ADDR start_addr, ULONGEST search_space_len,
2328 const gdb_byte *pattern, ULONGEST pattern_len,
2329 CORE_ADDR *found_addrp)
2330 {
2331 /* Start over from the top of the target stack. */
2332 return simple_search_memory (current_top_target (),
2333 start_addr, search_space_len,
2334 pattern, pattern_len, found_addrp);
2335 }
2336
2337 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2338 sequence of bytes in PATTERN with length PATTERN_LEN.
2339
2340 The result is 1 if found, 0 if not found, and -1 if there was an error
2341 requiring halting of the search (e.g. memory read error).
2342 If the pattern is found the address is recorded in FOUND_ADDRP. */
2343
2344 int
2345 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2346 const gdb_byte *pattern, ULONGEST pattern_len,
2347 CORE_ADDR *found_addrp)
2348 {
2349 return current_top_target ()->search_memory (start_addr, search_space_len,
2350 pattern, pattern_len, found_addrp);
2351 }
2352
2353 /* Look through the currently pushed targets. If none of them will
2354 be able to restart the currently running process, issue an error
2355 message. */
2356
2357 void
2358 target_require_runnable (void)
2359 {
2360 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2361 {
2362 /* If this target knows how to create a new program, then
2363 assume we will still be able to after killing the current
2364 one. Either killing and mourning will not pop T, or else
2365 find_default_run_target will find it again. */
2366 if (t->can_create_inferior ())
2367 return;
2368
2369 /* Do not worry about targets at certain strata that can not
2370 create inferiors. Assume they will be pushed again if
2371 necessary, and continue to the process_stratum. */
2372 if (t->stratum () > process_stratum)
2373 continue;
2374
2375 error (_("The \"%s\" target does not support \"run\". "
2376 "Try \"help target\" or \"continue\"."),
2377 t->shortname ());
2378 }
2379
2380 /* This function is only called if the target is running. In that
2381 case there should have been a process_stratum target and it
2382 should either know how to create inferiors, or not... */
2383 internal_error (__FILE__, __LINE__, _("No targets found"));
2384 }
2385
2386 /* Whether GDB is allowed to fall back to the default run target for
2387 "run", "attach", etc. when no target is connected yet. */
2388 static bool auto_connect_native_target = true;
2389
2390 static void
2391 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2392 struct cmd_list_element *c, const char *value)
2393 {
2394 fprintf_filtered (file,
2395 _("Whether GDB may automatically connect to the "
2396 "native target is %s.\n"),
2397 value);
2398 }
2399
2400 /* A pointer to the target that can respond to "run" or "attach".
2401 Native targets are always singletons and instantiated early at GDB
2402 startup. */
2403 static target_ops *the_native_target;
2404
2405 /* See target.h. */
2406
2407 void
2408 set_native_target (target_ops *target)
2409 {
2410 if (the_native_target != NULL)
2411 internal_error (__FILE__, __LINE__,
2412 _("native target already set (\"%s\")."),
2413 the_native_target->longname ());
2414
2415 the_native_target = target;
2416 }
2417
2418 /* See target.h. */
2419
2420 target_ops *
2421 get_native_target ()
2422 {
2423 return the_native_target;
2424 }
2425
2426 /* Look through the list of possible targets for a target that can
2427 execute a run or attach command without any other data. This is
2428 used to locate the default process stratum.
2429
2430 If DO_MESG is not NULL, the result is always valid (error() is
2431 called for errors); else, return NULL on error. */
2432
2433 static struct target_ops *
2434 find_default_run_target (const char *do_mesg)
2435 {
2436 if (auto_connect_native_target && the_native_target != NULL)
2437 return the_native_target;
2438
2439 if (do_mesg != NULL)
2440 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2441 return NULL;
2442 }
2443
2444 /* See target.h. */
2445
2446 struct target_ops *
2447 find_attach_target (void)
2448 {
2449 /* If a target on the current stack can attach, use it. */
2450 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2451 {
2452 if (t->can_attach ())
2453 return t;
2454 }
2455
2456 /* Otherwise, use the default run target for attaching. */
2457 return find_default_run_target ("attach");
2458 }
2459
2460 /* See target.h. */
2461
2462 struct target_ops *
2463 find_run_target (void)
2464 {
2465 /* If a target on the current stack can run, use it. */
2466 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2467 {
2468 if (t->can_create_inferior ())
2469 return t;
2470 }
2471
2472 /* Otherwise, use the default run target. */
2473 return find_default_run_target ("run");
2474 }
2475
2476 bool
2477 target_ops::info_proc (const char *args, enum info_proc_what what)
2478 {
2479 return false;
2480 }
2481
2482 /* Implement the "info proc" command. */
2483
2484 int
2485 target_info_proc (const char *args, enum info_proc_what what)
2486 {
2487 struct target_ops *t;
2488
2489 /* If we're already connected to something that can get us OS
2490 related data, use it. Otherwise, try using the native
2491 target. */
2492 t = find_target_at (process_stratum);
2493 if (t == NULL)
2494 t = find_default_run_target (NULL);
2495
2496 for (; t != NULL; t = t->beneath ())
2497 {
2498 if (t->info_proc (args, what))
2499 {
2500 if (targetdebug)
2501 fprintf_unfiltered (gdb_stdlog,
2502 "target_info_proc (\"%s\", %d)\n", args, what);
2503
2504 return 1;
2505 }
2506 }
2507
2508 return 0;
2509 }
2510
2511 static int
2512 find_default_supports_disable_randomization (struct target_ops *self)
2513 {
2514 struct target_ops *t;
2515
2516 t = find_default_run_target (NULL);
2517 if (t != NULL)
2518 return t->supports_disable_randomization ();
2519 return 0;
2520 }
2521
2522 int
2523 target_supports_disable_randomization (void)
2524 {
2525 return current_top_target ()->supports_disable_randomization ();
2526 }
2527
2528 /* See target/target.h. */
2529
2530 int
2531 target_supports_multi_process (void)
2532 {
2533 return current_top_target ()->supports_multi_process ();
2534 }
2535
2536 /* See target.h. */
2537
2538 gdb::optional<gdb::char_vector>
2539 target_get_osdata (const char *type)
2540 {
2541 struct target_ops *t;
2542
2543 /* If we're already connected to something that can get us OS
2544 related data, use it. Otherwise, try using the native
2545 target. */
2546 t = find_target_at (process_stratum);
2547 if (t == NULL)
2548 t = find_default_run_target ("get OS data");
2549
2550 if (!t)
2551 return {};
2552
2553 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2554 }
2555
2556 /* Determine the current address space of thread PTID. */
2557
2558 struct address_space *
2559 target_thread_address_space (ptid_t ptid)
2560 {
2561 struct address_space *aspace;
2562
2563 aspace = current_top_target ()->thread_address_space (ptid);
2564 gdb_assert (aspace != NULL);
2565
2566 return aspace;
2567 }
2568
2569 /* See target.h. */
2570
2571 target_ops *
2572 target_ops::beneath () const
2573 {
2574 return current_inferior ()->find_target_beneath (this);
2575 }
2576
2577 void
2578 target_ops::close ()
2579 {
2580 }
2581
2582 bool
2583 target_ops::can_attach ()
2584 {
2585 return 0;
2586 }
2587
2588 void
2589 target_ops::attach (const char *, int)
2590 {
2591 gdb_assert_not_reached ("target_ops::attach called");
2592 }
2593
2594 bool
2595 target_ops::can_create_inferior ()
2596 {
2597 return 0;
2598 }
2599
2600 void
2601 target_ops::create_inferior (const char *, const std::string &,
2602 char **, int)
2603 {
2604 gdb_assert_not_reached ("target_ops::create_inferior called");
2605 }
2606
2607 bool
2608 target_ops::can_run ()
2609 {
2610 return false;
2611 }
2612
2613 int
2614 target_can_run ()
2615 {
2616 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2617 {
2618 if (t->can_run ())
2619 return 1;
2620 }
2621
2622 return 0;
2623 }
2624
2625 /* Target file operations. */
2626
2627 static struct target_ops *
2628 default_fileio_target (void)
2629 {
2630 struct target_ops *t;
2631
2632 /* If we're already connected to something that can perform
2633 file I/O, use it. Otherwise, try using the native target. */
2634 t = find_target_at (process_stratum);
2635 if (t != NULL)
2636 return t;
2637 return find_default_run_target ("file I/O");
2638 }
2639
2640 /* File handle for target file operations. */
2641
2642 struct fileio_fh_t
2643 {
2644 /* The target on which this file is open. NULL if the target is
2645 meanwhile closed while the handle is open. */
2646 target_ops *target;
2647
2648 /* The file descriptor on the target. */
2649 int target_fd;
2650
2651 /* Check whether this fileio_fh_t represents a closed file. */
2652 bool is_closed ()
2653 {
2654 return target_fd < 0;
2655 }
2656 };
2657
2658 /* Vector of currently open file handles. The value returned by
2659 target_fileio_open and passed as the FD argument to other
2660 target_fileio_* functions is an index into this vector. This
2661 vector's entries are never freed; instead, files are marked as
2662 closed, and the handle becomes available for reuse. */
2663 static std::vector<fileio_fh_t> fileio_fhandles;
2664
2665 /* Index into fileio_fhandles of the lowest handle that might be
2666 closed. This permits handle reuse without searching the whole
2667 list each time a new file is opened. */
2668 static int lowest_closed_fd;
2669
2670 /* Invalidate the target associated with open handles that were open
2671 on target TARG, since we're about to close (and maybe destroy) the
2672 target. The handles remain open from the client's perspective, but
2673 trying to do anything with them other than closing them will fail
2674 with EIO. */
2675
2676 static void
2677 fileio_handles_invalidate_target (target_ops *targ)
2678 {
2679 for (fileio_fh_t &fh : fileio_fhandles)
2680 if (fh.target == targ)
2681 fh.target = NULL;
2682 }
2683
2684 /* Acquire a target fileio file descriptor. */
2685
2686 static int
2687 acquire_fileio_fd (target_ops *target, int target_fd)
2688 {
2689 /* Search for closed handles to reuse. */
2690 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
2691 {
2692 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
2693
2694 if (fh.is_closed ())
2695 break;
2696 }
2697
2698 /* Push a new handle if no closed handles were found. */
2699 if (lowest_closed_fd == fileio_fhandles.size ())
2700 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
2701 else
2702 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
2703
2704 /* Should no longer be marked closed. */
2705 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
2706
2707 /* Return its index, and start the next lookup at
2708 the next index. */
2709 return lowest_closed_fd++;
2710 }
2711
2712 /* Release a target fileio file descriptor. */
2713
2714 static void
2715 release_fileio_fd (int fd, fileio_fh_t *fh)
2716 {
2717 fh->target_fd = -1;
2718 lowest_closed_fd = std::min (lowest_closed_fd, fd);
2719 }
2720
2721 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
2722
2723 static fileio_fh_t *
2724 fileio_fd_to_fh (int fd)
2725 {
2726 return &fileio_fhandles[fd];
2727 }
2728
2729
2730 /* Default implementations of file i/o methods. We don't want these
2731 to delegate automatically, because we need to know which target
2732 supported the method, in order to call it directly from within
2733 pread/pwrite, etc. */
2734
2735 int
2736 target_ops::fileio_open (struct inferior *inf, const char *filename,
2737 int flags, int mode, int warn_if_slow,
2738 int *target_errno)
2739 {
2740 *target_errno = FILEIO_ENOSYS;
2741 return -1;
2742 }
2743
2744 int
2745 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2746 ULONGEST offset, int *target_errno)
2747 {
2748 *target_errno = FILEIO_ENOSYS;
2749 return -1;
2750 }
2751
2752 int
2753 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
2754 ULONGEST offset, int *target_errno)
2755 {
2756 *target_errno = FILEIO_ENOSYS;
2757 return -1;
2758 }
2759
2760 int
2761 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
2762 {
2763 *target_errno = FILEIO_ENOSYS;
2764 return -1;
2765 }
2766
2767 int
2768 target_ops::fileio_close (int fd, int *target_errno)
2769 {
2770 *target_errno = FILEIO_ENOSYS;
2771 return -1;
2772 }
2773
2774 int
2775 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
2776 int *target_errno)
2777 {
2778 *target_errno = FILEIO_ENOSYS;
2779 return -1;
2780 }
2781
2782 gdb::optional<std::string>
2783 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
2784 int *target_errno)
2785 {
2786 *target_errno = FILEIO_ENOSYS;
2787 return {};
2788 }
2789
2790 /* Helper for target_fileio_open and
2791 target_fileio_open_warn_if_slow. */
2792
2793 static int
2794 target_fileio_open_1 (struct inferior *inf, const char *filename,
2795 int flags, int mode, int warn_if_slow,
2796 int *target_errno)
2797 {
2798 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2799 {
2800 int fd = t->fileio_open (inf, filename, flags, mode,
2801 warn_if_slow, target_errno);
2802
2803 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
2804 continue;
2805
2806 if (fd < 0)
2807 fd = -1;
2808 else
2809 fd = acquire_fileio_fd (t, fd);
2810
2811 if (targetdebug)
2812 fprintf_unfiltered (gdb_stdlog,
2813 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
2814 " = %d (%d)\n",
2815 inf == NULL ? 0 : inf->num,
2816 filename, flags, mode,
2817 warn_if_slow, fd,
2818 fd != -1 ? 0 : *target_errno);
2819 return fd;
2820 }
2821
2822 *target_errno = FILEIO_ENOSYS;
2823 return -1;
2824 }
2825
2826 /* See target.h. */
2827
2828 int
2829 target_fileio_open (struct inferior *inf, const char *filename,
2830 int flags, int mode, int *target_errno)
2831 {
2832 return target_fileio_open_1 (inf, filename, flags, mode, 0,
2833 target_errno);
2834 }
2835
2836 /* See target.h. */
2837
2838 int
2839 target_fileio_open_warn_if_slow (struct inferior *inf,
2840 const char *filename,
2841 int flags, int mode, int *target_errno)
2842 {
2843 return target_fileio_open_1 (inf, filename, flags, mode, 1,
2844 target_errno);
2845 }
2846
2847 /* See target.h. */
2848
2849 int
2850 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2851 ULONGEST offset, int *target_errno)
2852 {
2853 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2854 int ret = -1;
2855
2856 if (fh->is_closed ())
2857 *target_errno = EBADF;
2858 else if (fh->target == NULL)
2859 *target_errno = EIO;
2860 else
2861 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
2862 len, offset, target_errno);
2863
2864 if (targetdebug)
2865 fprintf_unfiltered (gdb_stdlog,
2866 "target_fileio_pwrite (%d,...,%d,%s) "
2867 "= %d (%d)\n",
2868 fd, len, pulongest (offset),
2869 ret, ret != -1 ? 0 : *target_errno);
2870 return ret;
2871 }
2872
2873 /* See target.h. */
2874
2875 int
2876 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2877 ULONGEST offset, int *target_errno)
2878 {
2879 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2880 int ret = -1;
2881
2882 if (fh->is_closed ())
2883 *target_errno = EBADF;
2884 else if (fh->target == NULL)
2885 *target_errno = EIO;
2886 else
2887 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
2888 len, offset, target_errno);
2889
2890 if (targetdebug)
2891 fprintf_unfiltered (gdb_stdlog,
2892 "target_fileio_pread (%d,...,%d,%s) "
2893 "= %d (%d)\n",
2894 fd, len, pulongest (offset),
2895 ret, ret != -1 ? 0 : *target_errno);
2896 return ret;
2897 }
2898
2899 /* See target.h. */
2900
2901 int
2902 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
2903 {
2904 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2905 int ret = -1;
2906
2907 if (fh->is_closed ())
2908 *target_errno = EBADF;
2909 else if (fh->target == NULL)
2910 *target_errno = EIO;
2911 else
2912 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
2913
2914 if (targetdebug)
2915 fprintf_unfiltered (gdb_stdlog,
2916 "target_fileio_fstat (%d) = %d (%d)\n",
2917 fd, ret, ret != -1 ? 0 : *target_errno);
2918 return ret;
2919 }
2920
2921 /* See target.h. */
2922
2923 int
2924 target_fileio_close (int fd, int *target_errno)
2925 {
2926 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2927 int ret = -1;
2928
2929 if (fh->is_closed ())
2930 *target_errno = EBADF;
2931 else
2932 {
2933 if (fh->target != NULL)
2934 ret = fh->target->fileio_close (fh->target_fd,
2935 target_errno);
2936 else
2937 ret = 0;
2938 release_fileio_fd (fd, fh);
2939 }
2940
2941 if (targetdebug)
2942 fprintf_unfiltered (gdb_stdlog,
2943 "target_fileio_close (%d) = %d (%d)\n",
2944 fd, ret, ret != -1 ? 0 : *target_errno);
2945 return ret;
2946 }
2947
2948 /* See target.h. */
2949
2950 int
2951 target_fileio_unlink (struct inferior *inf, const char *filename,
2952 int *target_errno)
2953 {
2954 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2955 {
2956 int ret = t->fileio_unlink (inf, filename, target_errno);
2957
2958 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
2959 continue;
2960
2961 if (targetdebug)
2962 fprintf_unfiltered (gdb_stdlog,
2963 "target_fileio_unlink (%d,%s)"
2964 " = %d (%d)\n",
2965 inf == NULL ? 0 : inf->num, filename,
2966 ret, ret != -1 ? 0 : *target_errno);
2967 return ret;
2968 }
2969
2970 *target_errno = FILEIO_ENOSYS;
2971 return -1;
2972 }
2973
2974 /* See target.h. */
2975
2976 gdb::optional<std::string>
2977 target_fileio_readlink (struct inferior *inf, const char *filename,
2978 int *target_errno)
2979 {
2980 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2981 {
2982 gdb::optional<std::string> ret
2983 = t->fileio_readlink (inf, filename, target_errno);
2984
2985 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
2986 continue;
2987
2988 if (targetdebug)
2989 fprintf_unfiltered (gdb_stdlog,
2990 "target_fileio_readlink (%d,%s)"
2991 " = %s (%d)\n",
2992 inf == NULL ? 0 : inf->num,
2993 filename, ret ? ret->c_str () : "(nil)",
2994 ret ? 0 : *target_errno);
2995 return ret;
2996 }
2997
2998 *target_errno = FILEIO_ENOSYS;
2999 return {};
3000 }
3001
3002 /* Like scoped_fd, but specific to target fileio. */
3003
3004 class scoped_target_fd
3005 {
3006 public:
3007 explicit scoped_target_fd (int fd) noexcept
3008 : m_fd (fd)
3009 {
3010 }
3011
3012 ~scoped_target_fd ()
3013 {
3014 if (m_fd >= 0)
3015 {
3016 int target_errno;
3017
3018 target_fileio_close (m_fd, &target_errno);
3019 }
3020 }
3021
3022 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3023
3024 int get () const noexcept
3025 {
3026 return m_fd;
3027 }
3028
3029 private:
3030 int m_fd;
3031 };
3032
3033 /* Read target file FILENAME, in the filesystem as seen by INF. If
3034 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3035 remote targets, the remote stub). Store the result in *BUF_P and
3036 return the size of the transferred data. PADDING additional bytes
3037 are available in *BUF_P. This is a helper function for
3038 target_fileio_read_alloc; see the declaration of that function for
3039 more information. */
3040
3041 static LONGEST
3042 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3043 gdb_byte **buf_p, int padding)
3044 {
3045 size_t buf_alloc, buf_pos;
3046 gdb_byte *buf;
3047 LONGEST n;
3048 int target_errno;
3049
3050 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3051 0700, &target_errno));
3052 if (fd.get () == -1)
3053 return -1;
3054
3055 /* Start by reading up to 4K at a time. The target will throttle
3056 this number down if necessary. */
3057 buf_alloc = 4096;
3058 buf = (gdb_byte *) xmalloc (buf_alloc);
3059 buf_pos = 0;
3060 while (1)
3061 {
3062 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3063 buf_alloc - buf_pos - padding, buf_pos,
3064 &target_errno);
3065 if (n < 0)
3066 {
3067 /* An error occurred. */
3068 xfree (buf);
3069 return -1;
3070 }
3071 else if (n == 0)
3072 {
3073 /* Read all there was. */
3074 if (buf_pos == 0)
3075 xfree (buf);
3076 else
3077 *buf_p = buf;
3078 return buf_pos;
3079 }
3080
3081 buf_pos += n;
3082
3083 /* If the buffer is filling up, expand it. */
3084 if (buf_alloc < buf_pos * 2)
3085 {
3086 buf_alloc *= 2;
3087 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3088 }
3089
3090 QUIT;
3091 }
3092 }
3093
3094 /* See target.h. */
3095
3096 LONGEST
3097 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3098 gdb_byte **buf_p)
3099 {
3100 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3101 }
3102
3103 /* See target.h. */
3104
3105 gdb::unique_xmalloc_ptr<char>
3106 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3107 {
3108 gdb_byte *buffer;
3109 char *bufstr;
3110 LONGEST i, transferred;
3111
3112 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3113 bufstr = (char *) buffer;
3114
3115 if (transferred < 0)
3116 return gdb::unique_xmalloc_ptr<char> (nullptr);
3117
3118 if (transferred == 0)
3119 return make_unique_xstrdup ("");
3120
3121 bufstr[transferred] = 0;
3122
3123 /* Check for embedded NUL bytes; but allow trailing NULs. */
3124 for (i = strlen (bufstr); i < transferred; i++)
3125 if (bufstr[i] != 0)
3126 {
3127 warning (_("target file %s "
3128 "contained unexpected null characters"),
3129 filename);
3130 break;
3131 }
3132
3133 return gdb::unique_xmalloc_ptr<char> (bufstr);
3134 }
3135
3136
3137 static int
3138 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3139 CORE_ADDR addr, int len)
3140 {
3141 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3142 }
3143
3144 static int
3145 default_watchpoint_addr_within_range (struct target_ops *target,
3146 CORE_ADDR addr,
3147 CORE_ADDR start, int length)
3148 {
3149 return addr >= start && addr < start + length;
3150 }
3151
3152 /* See target.h. */
3153
3154 target_ops *
3155 target_stack::find_beneath (const target_ops *t) const
3156 {
3157 /* Look for a non-empty slot at stratum levels beneath T's. */
3158 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3159 if (m_stack[stratum] != NULL)
3160 return m_stack[stratum];
3161
3162 return NULL;
3163 }
3164
3165 /* See target.h. */
3166
3167 struct target_ops *
3168 find_target_at (enum strata stratum)
3169 {
3170 return current_inferior ()->target_at (stratum);
3171 }
3172
3173 \f
3174
3175 /* See target.h */
3176
3177 void
3178 target_announce_detach (int from_tty)
3179 {
3180 pid_t pid;
3181 const char *exec_file;
3182
3183 if (!from_tty)
3184 return;
3185
3186 exec_file = get_exec_file (0);
3187 if (exec_file == NULL)
3188 exec_file = "";
3189
3190 pid = inferior_ptid.pid ();
3191 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3192 target_pid_to_str (ptid_t (pid)).c_str ());
3193 }
3194
3195 /* The inferior process has died. Long live the inferior! */
3196
3197 void
3198 generic_mourn_inferior (void)
3199 {
3200 inferior *inf = current_inferior ();
3201
3202 inferior_ptid = null_ptid;
3203
3204 /* Mark breakpoints uninserted in case something tries to delete a
3205 breakpoint while we delete the inferior's threads (which would
3206 fail, since the inferior is long gone). */
3207 mark_breakpoints_out ();
3208
3209 if (inf->pid != 0)
3210 exit_inferior (inf);
3211
3212 /* Note this wipes step-resume breakpoints, so needs to be done
3213 after exit_inferior, which ends up referencing the step-resume
3214 breakpoints through clear_thread_inferior_resources. */
3215 breakpoint_init_inferior (inf_exited);
3216
3217 registers_changed ();
3218
3219 reopen_exec_file ();
3220 reinit_frame_cache ();
3221
3222 if (deprecated_detach_hook)
3223 deprecated_detach_hook ();
3224 }
3225 \f
3226 /* Convert a normal process ID to a string. Returns the string in a
3227 static buffer. */
3228
3229 std::string
3230 normal_pid_to_str (ptid_t ptid)
3231 {
3232 return string_printf ("process %d", ptid.pid ());
3233 }
3234
3235 static std::string
3236 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3237 {
3238 return normal_pid_to_str (ptid);
3239 }
3240
3241 /* Error-catcher for target_find_memory_regions. */
3242 static int
3243 dummy_find_memory_regions (struct target_ops *self,
3244 find_memory_region_ftype ignore1, void *ignore2)
3245 {
3246 error (_("Command not implemented for this target."));
3247 return 0;
3248 }
3249
3250 /* Error-catcher for target_make_corefile_notes. */
3251 static char *
3252 dummy_make_corefile_notes (struct target_ops *self,
3253 bfd *ignore1, int *ignore2)
3254 {
3255 error (_("Command not implemented for this target."));
3256 return NULL;
3257 }
3258
3259 #include "target-delegates.c"
3260
3261 /* The initial current target, so that there is always a semi-valid
3262 current target. */
3263
3264 static dummy_target the_dummy_target;
3265
3266 /* See target.h. */
3267
3268 target_ops *
3269 get_dummy_target ()
3270 {
3271 return &the_dummy_target;
3272 }
3273
3274 static const target_info dummy_target_info = {
3275 "None",
3276 N_("None"),
3277 ""
3278 };
3279
3280 strata
3281 dummy_target::stratum () const
3282 {
3283 return dummy_stratum;
3284 }
3285
3286 strata
3287 debug_target::stratum () const
3288 {
3289 return debug_stratum;
3290 }
3291
3292 const target_info &
3293 dummy_target::info () const
3294 {
3295 return dummy_target_info;
3296 }
3297
3298 const target_info &
3299 debug_target::info () const
3300 {
3301 return beneath ()->info ();
3302 }
3303
3304 \f
3305
3306 void
3307 target_close (struct target_ops *targ)
3308 {
3309 gdb_assert (!target_is_pushed (targ));
3310
3311 fileio_handles_invalidate_target (targ);
3312
3313 targ->close ();
3314
3315 if (targetdebug)
3316 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3317 }
3318
3319 int
3320 target_thread_alive (ptid_t ptid)
3321 {
3322 return current_top_target ()->thread_alive (ptid);
3323 }
3324
3325 void
3326 target_update_thread_list (void)
3327 {
3328 current_top_target ()->update_thread_list ();
3329 }
3330
3331 void
3332 target_stop (ptid_t ptid)
3333 {
3334 if (!may_stop)
3335 {
3336 warning (_("May not interrupt or stop the target, ignoring attempt"));
3337 return;
3338 }
3339
3340 current_top_target ()->stop (ptid);
3341 }
3342
3343 void
3344 target_interrupt ()
3345 {
3346 if (!may_stop)
3347 {
3348 warning (_("May not interrupt or stop the target, ignoring attempt"));
3349 return;
3350 }
3351
3352 current_top_target ()->interrupt ();
3353 }
3354
3355 /* See target.h. */
3356
3357 void
3358 target_pass_ctrlc (void)
3359 {
3360 /* Pass the Ctrl-C to the first target that has a thread
3361 running. */
3362 for (inferior *inf : all_inferiors ())
3363 {
3364 target_ops *proc_target = inf->process_target ();
3365 if (proc_target == NULL)
3366 continue;
3367
3368 for (thread_info *thr : inf->threads ())
3369 {
3370 /* A thread can be THREAD_STOPPED and executing, while
3371 running an infcall. */
3372 if (thr->state == THREAD_RUNNING || thr->executing)
3373 {
3374 /* We can get here quite deep in target layers. Avoid
3375 switching thread context or anything that would
3376 communicate with the target (e.g., to fetch
3377 registers), or flushing e.g., the frame cache. We
3378 just switch inferior in order to be able to call
3379 through the target_stack. */
3380 scoped_restore_current_inferior restore_inferior;
3381 set_current_inferior (inf);
3382 current_top_target ()->pass_ctrlc ();
3383 return;
3384 }
3385 }
3386 }
3387 }
3388
3389 /* See target.h. */
3390
3391 void
3392 default_target_pass_ctrlc (struct target_ops *ops)
3393 {
3394 target_interrupt ();
3395 }
3396
3397 /* See target/target.h. */
3398
3399 void
3400 target_stop_and_wait (ptid_t ptid)
3401 {
3402 struct target_waitstatus status;
3403 bool was_non_stop = non_stop;
3404
3405 non_stop = true;
3406 target_stop (ptid);
3407
3408 memset (&status, 0, sizeof (status));
3409 target_wait (ptid, &status, 0);
3410
3411 non_stop = was_non_stop;
3412 }
3413
3414 /* See target/target.h. */
3415
3416 void
3417 target_continue_no_signal (ptid_t ptid)
3418 {
3419 target_resume (ptid, 0, GDB_SIGNAL_0);
3420 }
3421
3422 /* See target/target.h. */
3423
3424 void
3425 target_continue (ptid_t ptid, enum gdb_signal signal)
3426 {
3427 target_resume (ptid, 0, signal);
3428 }
3429
3430 /* Concatenate ELEM to LIST, a comma-separated list. */
3431
3432 static void
3433 str_comma_list_concat_elem (std::string *list, const char *elem)
3434 {
3435 if (!list->empty ())
3436 list->append (", ");
3437
3438 list->append (elem);
3439 }
3440
3441 /* Helper for target_options_to_string. If OPT is present in
3442 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3443 OPT is removed from TARGET_OPTIONS. */
3444
3445 static void
3446 do_option (int *target_options, std::string *ret,
3447 int opt, const char *opt_str)
3448 {
3449 if ((*target_options & opt) != 0)
3450 {
3451 str_comma_list_concat_elem (ret, opt_str);
3452 *target_options &= ~opt;
3453 }
3454 }
3455
3456 /* See target.h. */
3457
3458 std::string
3459 target_options_to_string (int target_options)
3460 {
3461 std::string ret;
3462
3463 #define DO_TARG_OPTION(OPT) \
3464 do_option (&target_options, &ret, OPT, #OPT)
3465
3466 DO_TARG_OPTION (TARGET_WNOHANG);
3467
3468 if (target_options != 0)
3469 str_comma_list_concat_elem (&ret, "unknown???");
3470
3471 return ret;
3472 }
3473
3474 void
3475 target_fetch_registers (struct regcache *regcache, int regno)
3476 {
3477 current_top_target ()->fetch_registers (regcache, regno);
3478 if (targetdebug)
3479 regcache->debug_print_register ("target_fetch_registers", regno);
3480 }
3481
3482 void
3483 target_store_registers (struct regcache *regcache, int regno)
3484 {
3485 if (!may_write_registers)
3486 error (_("Writing to registers is not allowed (regno %d)"), regno);
3487
3488 current_top_target ()->store_registers (regcache, regno);
3489 if (targetdebug)
3490 {
3491 regcache->debug_print_register ("target_store_registers", regno);
3492 }
3493 }
3494
3495 int
3496 target_core_of_thread (ptid_t ptid)
3497 {
3498 return current_top_target ()->core_of_thread (ptid);
3499 }
3500
3501 int
3502 simple_verify_memory (struct target_ops *ops,
3503 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3504 {
3505 LONGEST total_xfered = 0;
3506
3507 while (total_xfered < size)
3508 {
3509 ULONGEST xfered_len;
3510 enum target_xfer_status status;
3511 gdb_byte buf[1024];
3512 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3513
3514 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3515 buf, NULL, lma + total_xfered, howmuch,
3516 &xfered_len);
3517 if (status == TARGET_XFER_OK
3518 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3519 {
3520 total_xfered += xfered_len;
3521 QUIT;
3522 }
3523 else
3524 return 0;
3525 }
3526 return 1;
3527 }
3528
3529 /* Default implementation of memory verification. */
3530
3531 static int
3532 default_verify_memory (struct target_ops *self,
3533 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3534 {
3535 /* Start over from the top of the target stack. */
3536 return simple_verify_memory (current_top_target (),
3537 data, memaddr, size);
3538 }
3539
3540 int
3541 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3542 {
3543 return current_top_target ()->verify_memory (data, memaddr, size);
3544 }
3545
3546 /* The documentation for this function is in its prototype declaration in
3547 target.h. */
3548
3549 int
3550 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3551 enum target_hw_bp_type rw)
3552 {
3553 return current_top_target ()->insert_mask_watchpoint (addr, mask, rw);
3554 }
3555
3556 /* The documentation for this function is in its prototype declaration in
3557 target.h. */
3558
3559 int
3560 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3561 enum target_hw_bp_type rw)
3562 {
3563 return current_top_target ()->remove_mask_watchpoint (addr, mask, rw);
3564 }
3565
3566 /* The documentation for this function is in its prototype declaration
3567 in target.h. */
3568
3569 int
3570 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3571 {
3572 return current_top_target ()->masked_watch_num_registers (addr, mask);
3573 }
3574
3575 /* The documentation for this function is in its prototype declaration
3576 in target.h. */
3577
3578 int
3579 target_ranged_break_num_registers (void)
3580 {
3581 return current_top_target ()->ranged_break_num_registers ();
3582 }
3583
3584 /* See target.h. */
3585
3586 struct btrace_target_info *
3587 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
3588 {
3589 return current_top_target ()->enable_btrace (ptid, conf);
3590 }
3591
3592 /* See target.h. */
3593
3594 void
3595 target_disable_btrace (struct btrace_target_info *btinfo)
3596 {
3597 current_top_target ()->disable_btrace (btinfo);
3598 }
3599
3600 /* See target.h. */
3601
3602 void
3603 target_teardown_btrace (struct btrace_target_info *btinfo)
3604 {
3605 current_top_target ()->teardown_btrace (btinfo);
3606 }
3607
3608 /* See target.h. */
3609
3610 enum btrace_error
3611 target_read_btrace (struct btrace_data *btrace,
3612 struct btrace_target_info *btinfo,
3613 enum btrace_read_type type)
3614 {
3615 return current_top_target ()->read_btrace (btrace, btinfo, type);
3616 }
3617
3618 /* See target.h. */
3619
3620 const struct btrace_config *
3621 target_btrace_conf (const struct btrace_target_info *btinfo)
3622 {
3623 return current_top_target ()->btrace_conf (btinfo);
3624 }
3625
3626 /* See target.h. */
3627
3628 void
3629 target_stop_recording (void)
3630 {
3631 current_top_target ()->stop_recording ();
3632 }
3633
3634 /* See target.h. */
3635
3636 void
3637 target_save_record (const char *filename)
3638 {
3639 current_top_target ()->save_record (filename);
3640 }
3641
3642 /* See target.h. */
3643
3644 int
3645 target_supports_delete_record ()
3646 {
3647 return current_top_target ()->supports_delete_record ();
3648 }
3649
3650 /* See target.h. */
3651
3652 void
3653 target_delete_record (void)
3654 {
3655 current_top_target ()->delete_record ();
3656 }
3657
3658 /* See target.h. */
3659
3660 enum record_method
3661 target_record_method (ptid_t ptid)
3662 {
3663 return current_top_target ()->record_method (ptid);
3664 }
3665
3666 /* See target.h. */
3667
3668 int
3669 target_record_is_replaying (ptid_t ptid)
3670 {
3671 return current_top_target ()->record_is_replaying (ptid);
3672 }
3673
3674 /* See target.h. */
3675
3676 int
3677 target_record_will_replay (ptid_t ptid, int dir)
3678 {
3679 return current_top_target ()->record_will_replay (ptid, dir);
3680 }
3681
3682 /* See target.h. */
3683
3684 void
3685 target_record_stop_replaying (void)
3686 {
3687 current_top_target ()->record_stop_replaying ();
3688 }
3689
3690 /* See target.h. */
3691
3692 void
3693 target_goto_record_begin (void)
3694 {
3695 current_top_target ()->goto_record_begin ();
3696 }
3697
3698 /* See target.h. */
3699
3700 void
3701 target_goto_record_end (void)
3702 {
3703 current_top_target ()->goto_record_end ();
3704 }
3705
3706 /* See target.h. */
3707
3708 void
3709 target_goto_record (ULONGEST insn)
3710 {
3711 current_top_target ()->goto_record (insn);
3712 }
3713
3714 /* See target.h. */
3715
3716 void
3717 target_insn_history (int size, gdb_disassembly_flags flags)
3718 {
3719 current_top_target ()->insn_history (size, flags);
3720 }
3721
3722 /* See target.h. */
3723
3724 void
3725 target_insn_history_from (ULONGEST from, int size,
3726 gdb_disassembly_flags flags)
3727 {
3728 current_top_target ()->insn_history_from (from, size, flags);
3729 }
3730
3731 /* See target.h. */
3732
3733 void
3734 target_insn_history_range (ULONGEST begin, ULONGEST end,
3735 gdb_disassembly_flags flags)
3736 {
3737 current_top_target ()->insn_history_range (begin, end, flags);
3738 }
3739
3740 /* See target.h. */
3741
3742 void
3743 target_call_history (int size, record_print_flags flags)
3744 {
3745 current_top_target ()->call_history (size, flags);
3746 }
3747
3748 /* See target.h. */
3749
3750 void
3751 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
3752 {
3753 current_top_target ()->call_history_from (begin, size, flags);
3754 }
3755
3756 /* See target.h. */
3757
3758 void
3759 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
3760 {
3761 current_top_target ()->call_history_range (begin, end, flags);
3762 }
3763
3764 /* See target.h. */
3765
3766 const struct frame_unwind *
3767 target_get_unwinder (void)
3768 {
3769 return current_top_target ()->get_unwinder ();
3770 }
3771
3772 /* See target.h. */
3773
3774 const struct frame_unwind *
3775 target_get_tailcall_unwinder (void)
3776 {
3777 return current_top_target ()->get_tailcall_unwinder ();
3778 }
3779
3780 /* See target.h. */
3781
3782 void
3783 target_prepare_to_generate_core (void)
3784 {
3785 current_top_target ()->prepare_to_generate_core ();
3786 }
3787
3788 /* See target.h. */
3789
3790 void
3791 target_done_generating_core (void)
3792 {
3793 current_top_target ()->done_generating_core ();
3794 }
3795
3796 \f
3797
3798 static char targ_desc[] =
3799 "Names of targets and files being debugged.\nShows the entire \
3800 stack of targets currently in use (including the exec-file,\n\
3801 core-file, and process, if any), as well as the symbol file name.";
3802
3803 static void
3804 default_rcmd (struct target_ops *self, const char *command,
3805 struct ui_file *output)
3806 {
3807 error (_("\"monitor\" command not supported by this target."));
3808 }
3809
3810 static void
3811 do_monitor_command (const char *cmd, int from_tty)
3812 {
3813 target_rcmd (cmd, gdb_stdtarg);
3814 }
3815
3816 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
3817 ignored. */
3818
3819 void
3820 flash_erase_command (const char *cmd, int from_tty)
3821 {
3822 /* Used to communicate termination of flash operations to the target. */
3823 bool found_flash_region = false;
3824 struct gdbarch *gdbarch = target_gdbarch ();
3825
3826 std::vector<mem_region> mem_regions = target_memory_map ();
3827
3828 /* Iterate over all memory regions. */
3829 for (const mem_region &m : mem_regions)
3830 {
3831 /* Is this a flash memory region? */
3832 if (m.attrib.mode == MEM_FLASH)
3833 {
3834 found_flash_region = true;
3835 target_flash_erase (m.lo, m.hi - m.lo);
3836
3837 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
3838
3839 current_uiout->message (_("Erasing flash memory region at address "));
3840 current_uiout->field_core_addr ("address", gdbarch, m.lo);
3841 current_uiout->message (", size = ");
3842 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
3843 current_uiout->message ("\n");
3844 }
3845 }
3846
3847 /* Did we do any flash operations? If so, we need to finalize them. */
3848 if (found_flash_region)
3849 target_flash_done ();
3850 else
3851 current_uiout->message (_("No flash memory regions found.\n"));
3852 }
3853
3854 /* Print the name of each layers of our target stack. */
3855
3856 static void
3857 maintenance_print_target_stack (const char *cmd, int from_tty)
3858 {
3859 printf_filtered (_("The current target stack is:\n"));
3860
3861 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
3862 {
3863 if (t->stratum () == debug_stratum)
3864 continue;
3865 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
3866 }
3867 }
3868
3869 /* See target.h. */
3870
3871 void
3872 target_async (int enable)
3873 {
3874 infrun_async (enable);
3875 current_top_target ()->async (enable);
3876 }
3877
3878 /* See target.h. */
3879
3880 void
3881 target_thread_events (int enable)
3882 {
3883 current_top_target ()->thread_events (enable);
3884 }
3885
3886 /* Controls if targets can report that they can/are async. This is
3887 just for maintainers to use when debugging gdb. */
3888 bool target_async_permitted = true;
3889
3890 /* The set command writes to this variable. If the inferior is
3891 executing, target_async_permitted is *not* updated. */
3892 static bool target_async_permitted_1 = true;
3893
3894 static void
3895 maint_set_target_async_command (const char *args, int from_tty,
3896 struct cmd_list_element *c)
3897 {
3898 if (have_live_inferiors ())
3899 {
3900 target_async_permitted_1 = target_async_permitted;
3901 error (_("Cannot change this setting while the inferior is running."));
3902 }
3903
3904 target_async_permitted = target_async_permitted_1;
3905 }
3906
3907 static void
3908 maint_show_target_async_command (struct ui_file *file, int from_tty,
3909 struct cmd_list_element *c,
3910 const char *value)
3911 {
3912 fprintf_filtered (file,
3913 _("Controlling the inferior in "
3914 "asynchronous mode is %s.\n"), value);
3915 }
3916
3917 /* Return true if the target operates in non-stop mode even with "set
3918 non-stop off". */
3919
3920 static int
3921 target_always_non_stop_p (void)
3922 {
3923 return current_top_target ()->always_non_stop_p ();
3924 }
3925
3926 /* See target.h. */
3927
3928 int
3929 target_is_non_stop_p (void)
3930 {
3931 return (non_stop
3932 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
3933 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
3934 && target_always_non_stop_p ()));
3935 }
3936
3937 /* Controls if targets can report that they always run in non-stop
3938 mode. This is just for maintainers to use when debugging gdb. */
3939 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
3940
3941 /* The set command writes to this variable. If the inferior is
3942 executing, target_non_stop_enabled is *not* updated. */
3943 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
3944
3945 /* Implementation of "maint set target-non-stop". */
3946
3947 static void
3948 maint_set_target_non_stop_command (const char *args, int from_tty,
3949 struct cmd_list_element *c)
3950 {
3951 if (have_live_inferiors ())
3952 {
3953 target_non_stop_enabled_1 = target_non_stop_enabled;
3954 error (_("Cannot change this setting while the inferior is running."));
3955 }
3956
3957 target_non_stop_enabled = target_non_stop_enabled_1;
3958 }
3959
3960 /* Implementation of "maint show target-non-stop". */
3961
3962 static void
3963 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
3964 struct cmd_list_element *c,
3965 const char *value)
3966 {
3967 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
3968 fprintf_filtered (file,
3969 _("Whether the target is always in non-stop mode "
3970 "is %s (currently %s).\n"), value,
3971 target_always_non_stop_p () ? "on" : "off");
3972 else
3973 fprintf_filtered (file,
3974 _("Whether the target is always in non-stop mode "
3975 "is %s.\n"), value);
3976 }
3977
3978 /* Temporary copies of permission settings. */
3979
3980 static bool may_write_registers_1 = true;
3981 static bool may_write_memory_1 = true;
3982 static bool may_insert_breakpoints_1 = true;
3983 static bool may_insert_tracepoints_1 = true;
3984 static bool may_insert_fast_tracepoints_1 = true;
3985 static bool may_stop_1 = true;
3986
3987 /* Make the user-set values match the real values again. */
3988
3989 void
3990 update_target_permissions (void)
3991 {
3992 may_write_registers_1 = may_write_registers;
3993 may_write_memory_1 = may_write_memory;
3994 may_insert_breakpoints_1 = may_insert_breakpoints;
3995 may_insert_tracepoints_1 = may_insert_tracepoints;
3996 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3997 may_stop_1 = may_stop;
3998 }
3999
4000 /* The one function handles (most of) the permission flags in the same
4001 way. */
4002
4003 static void
4004 set_target_permissions (const char *args, int from_tty,
4005 struct cmd_list_element *c)
4006 {
4007 if (target_has_execution)
4008 {
4009 update_target_permissions ();
4010 error (_("Cannot change this setting while the inferior is running."));
4011 }
4012
4013 /* Make the real values match the user-changed values. */
4014 may_write_registers = may_write_registers_1;
4015 may_insert_breakpoints = may_insert_breakpoints_1;
4016 may_insert_tracepoints = may_insert_tracepoints_1;
4017 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4018 may_stop = may_stop_1;
4019 update_observer_mode ();
4020 }
4021
4022 /* Set memory write permission independently of observer mode. */
4023
4024 static void
4025 set_write_memory_permission (const char *args, int from_tty,
4026 struct cmd_list_element *c)
4027 {
4028 /* Make the real values match the user-changed values. */
4029 may_write_memory = may_write_memory_1;
4030 update_observer_mode ();
4031 }
4032
4033 void
4034 _initialize_target ()
4035 {
4036 the_debug_target = new debug_target ();
4037
4038 add_info ("target", info_target_command, targ_desc);
4039 add_info ("files", info_target_command, targ_desc);
4040
4041 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4042 Set target debugging."), _("\
4043 Show target debugging."), _("\
4044 When non-zero, target debugging is enabled. Higher numbers are more\n\
4045 verbose."),
4046 set_targetdebug,
4047 show_targetdebug,
4048 &setdebuglist, &showdebuglist);
4049
4050 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4051 &trust_readonly, _("\
4052 Set mode for reading from readonly sections."), _("\
4053 Show mode for reading from readonly sections."), _("\
4054 When this mode is on, memory reads from readonly sections (such as .text)\n\
4055 will be read from the object file instead of from the target. This will\n\
4056 result in significant performance improvement for remote targets."),
4057 NULL,
4058 show_trust_readonly,
4059 &setlist, &showlist);
4060
4061 add_com ("monitor", class_obscure, do_monitor_command,
4062 _("Send a command to the remote monitor (remote targets only)."));
4063
4064 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4065 _("Print the name of each layer of the internal target stack."),
4066 &maintenanceprintlist);
4067
4068 add_setshow_boolean_cmd ("target-async", no_class,
4069 &target_async_permitted_1, _("\
4070 Set whether gdb controls the inferior in asynchronous mode."), _("\
4071 Show whether gdb controls the inferior in asynchronous mode."), _("\
4072 Tells gdb whether to control the inferior in asynchronous mode."),
4073 maint_set_target_async_command,
4074 maint_show_target_async_command,
4075 &maintenance_set_cmdlist,
4076 &maintenance_show_cmdlist);
4077
4078 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4079 &target_non_stop_enabled_1, _("\
4080 Set whether gdb always controls the inferior in non-stop mode."), _("\
4081 Show whether gdb always controls the inferior in non-stop mode."), _("\
4082 Tells gdb whether to control the inferior in non-stop mode."),
4083 maint_set_target_non_stop_command,
4084 maint_show_target_non_stop_command,
4085 &maintenance_set_cmdlist,
4086 &maintenance_show_cmdlist);
4087
4088 add_setshow_boolean_cmd ("may-write-registers", class_support,
4089 &may_write_registers_1, _("\
4090 Set permission to write into registers."), _("\
4091 Show permission to write into registers."), _("\
4092 When this permission is on, GDB may write into the target's registers.\n\
4093 Otherwise, any sort of write attempt will result in an error."),
4094 set_target_permissions, NULL,
4095 &setlist, &showlist);
4096
4097 add_setshow_boolean_cmd ("may-write-memory", class_support,
4098 &may_write_memory_1, _("\
4099 Set permission to write into target memory."), _("\
4100 Show permission to write into target memory."), _("\
4101 When this permission is on, GDB may write into the target's memory.\n\
4102 Otherwise, any sort of write attempt will result in an error."),
4103 set_write_memory_permission, NULL,
4104 &setlist, &showlist);
4105
4106 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4107 &may_insert_breakpoints_1, _("\
4108 Set permission to insert breakpoints in the target."), _("\
4109 Show permission to insert breakpoints in the target."), _("\
4110 When this permission is on, GDB may insert breakpoints in the program.\n\
4111 Otherwise, any sort of insertion attempt will result in an error."),
4112 set_target_permissions, NULL,
4113 &setlist, &showlist);
4114
4115 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4116 &may_insert_tracepoints_1, _("\
4117 Set permission to insert tracepoints in the target."), _("\
4118 Show permission to insert tracepoints in the target."), _("\
4119 When this permission is on, GDB may insert tracepoints in the program.\n\
4120 Otherwise, any sort of insertion attempt will result in an error."),
4121 set_target_permissions, NULL,
4122 &setlist, &showlist);
4123
4124 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4125 &may_insert_fast_tracepoints_1, _("\
4126 Set permission to insert fast tracepoints in the target."), _("\
4127 Show permission to insert fast tracepoints in the target."), _("\
4128 When this permission is on, GDB may insert fast tracepoints.\n\
4129 Otherwise, any sort of insertion attempt will result in an error."),
4130 set_target_permissions, NULL,
4131 &setlist, &showlist);
4132
4133 add_setshow_boolean_cmd ("may-interrupt", class_support,
4134 &may_stop_1, _("\
4135 Set permission to interrupt or signal the target."), _("\
4136 Show permission to interrupt or signal the target."), _("\
4137 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4138 Otherwise, any attempt to interrupt or stop will be ignored."),
4139 set_target_permissions, NULL,
4140 &setlist, &showlist);
4141
4142 add_com ("flash-erase", no_class, flash_erase_command,
4143 _("Erase all flash memory regions."));
4144
4145 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4146 &auto_connect_native_target, _("\
4147 Set whether GDB may automatically connect to the native target."), _("\
4148 Show whether GDB may automatically connect to the native target."), _("\
4149 When on, and GDB is not connected to a target yet, GDB\n\
4150 attempts \"run\" and other commands with the native target."),
4151 NULL, show_auto_connect_native_target,
4152 &setlist, &showlist);
4153 }
This page took 0.139236 seconds and 4 git commands to generate.