gdb: bool-ify follow_fork
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2020 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "terminal.h"
51 #include <unordered_map>
52 #include "target-connection.h"
53
54 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
55
56 static void default_terminal_info (struct target_ops *, const char *, int);
57
58 static int default_watchpoint_addr_within_range (struct target_ops *,
59 CORE_ADDR, CORE_ADDR, int);
60
61 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
62 CORE_ADDR, int);
63
64 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
65
66 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
67 long lwp, long tid);
68
69 static void default_mourn_inferior (struct target_ops *self);
70
71 static int default_search_memory (struct target_ops *ops,
72 CORE_ADDR start_addr,
73 ULONGEST search_space_len,
74 const gdb_byte *pattern,
75 ULONGEST pattern_len,
76 CORE_ADDR *found_addrp);
77
78 static int default_verify_memory (struct target_ops *self,
79 const gdb_byte *data,
80 CORE_ADDR memaddr, ULONGEST size);
81
82 static void tcomplain (void) ATTRIBUTE_NORETURN;
83
84 static struct target_ops *find_default_run_target (const char *);
85
86 static int dummy_find_memory_regions (struct target_ops *self,
87 find_memory_region_ftype ignore1,
88 void *ignore2);
89
90 static char *dummy_make_corefile_notes (struct target_ops *self,
91 bfd *ignore1, int *ignore2);
92
93 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
94
95 static enum exec_direction_kind default_execution_direction
96 (struct target_ops *self);
97
98 /* Mapping between target_info objects (which have address identity)
99 and corresponding open/factory function/callback. Each add_target
100 call adds one entry to this map, and registers a "target
101 TARGET_NAME" command that when invoked calls the factory registered
102 here. The target_info object is associated with the command via
103 the command's context. */
104 static std::unordered_map<const target_info *, target_open_ftype *>
105 target_factories;
106
107 /* The singleton debug target. */
108
109 static struct target_ops *the_debug_target;
110
111 /* Top of target stack. */
112 /* The target structure we are currently using to talk to a process
113 or file or whatever "inferior" we have. */
114
115 target_ops *
116 current_top_target ()
117 {
118 return current_inferior ()->top_target ();
119 }
120
121 /* Command list for target. */
122
123 static struct cmd_list_element *targetlist = NULL;
124
125 /* True if we should trust readonly sections from the
126 executable when reading memory. */
127
128 static bool trust_readonly = false;
129
130 /* Nonzero if we should show true memory content including
131 memory breakpoint inserted by gdb. */
132
133 static int show_memory_breakpoints = 0;
134
135 /* These globals control whether GDB attempts to perform these
136 operations; they are useful for targets that need to prevent
137 inadvertent disruption, such as in non-stop mode. */
138
139 bool may_write_registers = true;
140
141 bool may_write_memory = true;
142
143 bool may_insert_breakpoints = true;
144
145 bool may_insert_tracepoints = true;
146
147 bool may_insert_fast_tracepoints = true;
148
149 bool may_stop = true;
150
151 /* Non-zero if we want to see trace of target level stuff. */
152
153 static unsigned int targetdebug = 0;
154
155 static void
156 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
157 {
158 if (targetdebug)
159 push_target (the_debug_target);
160 else
161 unpush_target (the_debug_target);
162 }
163
164 static void
165 show_targetdebug (struct ui_file *file, int from_tty,
166 struct cmd_list_element *c, const char *value)
167 {
168 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
169 }
170
171 /* The user just typed 'target' without the name of a target. */
172
173 static void
174 target_command (const char *arg, int from_tty)
175 {
176 fputs_filtered ("Argument required (target name). Try `help target'\n",
177 gdb_stdout);
178 }
179
180 int
181 target_has_all_memory_1 (void)
182 {
183 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
184 if (t->has_all_memory ())
185 return 1;
186
187 return 0;
188 }
189
190 int
191 target_has_memory_1 (void)
192 {
193 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
194 if (t->has_memory ())
195 return 1;
196
197 return 0;
198 }
199
200 int
201 target_has_stack_1 (void)
202 {
203 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
204 if (t->has_stack ())
205 return 1;
206
207 return 0;
208 }
209
210 int
211 target_has_registers_1 (void)
212 {
213 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
214 if (t->has_registers ())
215 return 1;
216
217 return 0;
218 }
219
220 bool
221 target_has_execution_1 (inferior *inf)
222 {
223 for (target_ops *t = inf->top_target ();
224 t != nullptr;
225 t = inf->find_target_beneath (t))
226 if (t->has_execution (inf))
227 return true;
228
229 return false;
230 }
231
232 int
233 target_has_execution_current (void)
234 {
235 return target_has_execution_1 (current_inferior ());
236 }
237
238 /* This is used to implement the various target commands. */
239
240 static void
241 open_target (const char *args, int from_tty, struct cmd_list_element *command)
242 {
243 auto *ti = static_cast<target_info *> (get_cmd_context (command));
244 target_open_ftype *func = target_factories[ti];
245
246 if (targetdebug)
247 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
248 ti->shortname);
249
250 func (args, from_tty);
251
252 if (targetdebug)
253 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
254 ti->shortname, args, from_tty);
255 }
256
257 /* See target.h. */
258
259 void
260 add_target (const target_info &t, target_open_ftype *func,
261 completer_ftype *completer)
262 {
263 struct cmd_list_element *c;
264
265 auto &func_slot = target_factories[&t];
266 if (func_slot != nullptr)
267 internal_error (__FILE__, __LINE__,
268 _("target already added (\"%s\")."), t.shortname);
269 func_slot = func;
270
271 if (targetlist == NULL)
272 add_prefix_cmd ("target", class_run, target_command, _("\
273 Connect to a target machine or process.\n\
274 The first argument is the type or protocol of the target machine.\n\
275 Remaining arguments are interpreted by the target protocol. For more\n\
276 information on the arguments for a particular protocol, type\n\
277 `help target ' followed by the protocol name."),
278 &targetlist, "target ", 0, &cmdlist);
279 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
280 set_cmd_context (c, (void *) &t);
281 set_cmd_sfunc (c, open_target);
282 if (completer != NULL)
283 set_cmd_completer (c, completer);
284 }
285
286 /* See target.h. */
287
288 void
289 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
290 {
291 struct cmd_list_element *c;
292 char *alt;
293
294 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
295 see PR cli/15104. */
296 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
297 set_cmd_sfunc (c, open_target);
298 set_cmd_context (c, (void *) &tinfo);
299 alt = xstrprintf ("target %s", tinfo.shortname);
300 deprecate_cmd (c, alt);
301 }
302
303 /* Stub functions */
304
305 void
306 target_kill (void)
307 {
308 current_top_target ()->kill ();
309 }
310
311 void
312 target_load (const char *arg, int from_tty)
313 {
314 target_dcache_invalidate ();
315 current_top_target ()->load (arg, from_tty);
316 }
317
318 /* Define it. */
319
320 target_terminal_state target_terminal::m_terminal_state
321 = target_terminal_state::is_ours;
322
323 /* See target/target.h. */
324
325 void
326 target_terminal::init (void)
327 {
328 current_top_target ()->terminal_init ();
329
330 m_terminal_state = target_terminal_state::is_ours;
331 }
332
333 /* See target/target.h. */
334
335 void
336 target_terminal::inferior (void)
337 {
338 struct ui *ui = current_ui;
339
340 /* A background resume (``run&'') should leave GDB in control of the
341 terminal. */
342 if (ui->prompt_state != PROMPT_BLOCKED)
343 return;
344
345 /* Since we always run the inferior in the main console (unless "set
346 inferior-tty" is in effect), when some UI other than the main one
347 calls target_terminal::inferior, then we leave the main UI's
348 terminal settings as is. */
349 if (ui != main_ui)
350 return;
351
352 /* If GDB is resuming the inferior in the foreground, install
353 inferior's terminal modes. */
354
355 struct inferior *inf = current_inferior ();
356
357 if (inf->terminal_state != target_terminal_state::is_inferior)
358 {
359 current_top_target ()->terminal_inferior ();
360 inf->terminal_state = target_terminal_state::is_inferior;
361 }
362
363 m_terminal_state = target_terminal_state::is_inferior;
364
365 /* If the user hit C-c before, pretend that it was hit right
366 here. */
367 if (check_quit_flag ())
368 target_pass_ctrlc ();
369 }
370
371 /* See target/target.h. */
372
373 void
374 target_terminal::restore_inferior (void)
375 {
376 struct ui *ui = current_ui;
377
378 /* See target_terminal::inferior(). */
379 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
380 return;
381
382 /* Restore the terminal settings of inferiors that were in the
383 foreground but are now ours_for_output due to a temporary
384 target_target::ours_for_output() call. */
385
386 {
387 scoped_restore_current_inferior restore_inferior;
388
389 for (::inferior *inf : all_inferiors ())
390 {
391 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
392 {
393 set_current_inferior (inf);
394 current_top_target ()->terminal_inferior ();
395 inf->terminal_state = target_terminal_state::is_inferior;
396 }
397 }
398 }
399
400 m_terminal_state = target_terminal_state::is_inferior;
401
402 /* If the user hit C-c before, pretend that it was hit right
403 here. */
404 if (check_quit_flag ())
405 target_pass_ctrlc ();
406 }
407
408 /* Switch terminal state to DESIRED_STATE, either is_ours, or
409 is_ours_for_output. */
410
411 static void
412 target_terminal_is_ours_kind (target_terminal_state desired_state)
413 {
414 scoped_restore_current_inferior restore_inferior;
415
416 /* Must do this in two passes. First, have all inferiors save the
417 current terminal settings. Then, after all inferiors have add a
418 chance to safely save the terminal settings, restore GDB's
419 terminal settings. */
420
421 for (inferior *inf : all_inferiors ())
422 {
423 if (inf->terminal_state == target_terminal_state::is_inferior)
424 {
425 set_current_inferior (inf);
426 current_top_target ()->terminal_save_inferior ();
427 }
428 }
429
430 for (inferior *inf : all_inferiors ())
431 {
432 /* Note we don't check is_inferior here like above because we
433 need to handle 'is_ours_for_output -> is_ours' too. Careful
434 to never transition from 'is_ours' to 'is_ours_for_output',
435 though. */
436 if (inf->terminal_state != target_terminal_state::is_ours
437 && inf->terminal_state != desired_state)
438 {
439 set_current_inferior (inf);
440 if (desired_state == target_terminal_state::is_ours)
441 current_top_target ()->terminal_ours ();
442 else if (desired_state == target_terminal_state::is_ours_for_output)
443 current_top_target ()->terminal_ours_for_output ();
444 else
445 gdb_assert_not_reached ("unhandled desired state");
446 inf->terminal_state = desired_state;
447 }
448 }
449 }
450
451 /* See target/target.h. */
452
453 void
454 target_terminal::ours ()
455 {
456 struct ui *ui = current_ui;
457
458 /* See target_terminal::inferior. */
459 if (ui != main_ui)
460 return;
461
462 if (m_terminal_state == target_terminal_state::is_ours)
463 return;
464
465 target_terminal_is_ours_kind (target_terminal_state::is_ours);
466 m_terminal_state = target_terminal_state::is_ours;
467 }
468
469 /* See target/target.h. */
470
471 void
472 target_terminal::ours_for_output ()
473 {
474 struct ui *ui = current_ui;
475
476 /* See target_terminal::inferior. */
477 if (ui != main_ui)
478 return;
479
480 if (!target_terminal::is_inferior ())
481 return;
482
483 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
484 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
485 }
486
487 /* See target/target.h. */
488
489 void
490 target_terminal::info (const char *arg, int from_tty)
491 {
492 current_top_target ()->terminal_info (arg, from_tty);
493 }
494
495 /* See target.h. */
496
497 bool
498 target_supports_terminal_ours (void)
499 {
500 /* The current top target is the target at the top of the target
501 stack of the current inferior. While normally there's always an
502 inferior, we must check for nullptr here because we can get here
503 very early during startup, before the initial inferior is first
504 created. */
505 inferior *inf = current_inferior ();
506
507 if (inf == nullptr)
508 return false;
509 return inf->top_target ()->supports_terminal_ours ();
510 }
511
512 static void
513 tcomplain (void)
514 {
515 error (_("You can't do that when your target is `%s'"),
516 current_top_target ()->shortname ());
517 }
518
519 void
520 noprocess (void)
521 {
522 error (_("You can't do that without a process to debug."));
523 }
524
525 static void
526 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
527 {
528 printf_unfiltered (_("No saved terminal information.\n"));
529 }
530
531 /* A default implementation for the to_get_ada_task_ptid target method.
532
533 This function builds the PTID by using both LWP and TID as part of
534 the PTID lwp and tid elements. The pid used is the pid of the
535 inferior_ptid. */
536
537 static ptid_t
538 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
539 {
540 return ptid_t (inferior_ptid.pid (), lwp, tid);
541 }
542
543 static enum exec_direction_kind
544 default_execution_direction (struct target_ops *self)
545 {
546 if (!target_can_execute_reverse)
547 return EXEC_FORWARD;
548 else if (!target_can_async_p ())
549 return EXEC_FORWARD;
550 else
551 gdb_assert_not_reached ("\
552 to_execution_direction must be implemented for reverse async");
553 }
554
555 /* See target.h. */
556
557 void
558 decref_target (target_ops *t)
559 {
560 t->decref ();
561 if (t->refcount () == 0)
562 {
563 if (t->stratum () == process_stratum)
564 connection_list_remove (as_process_stratum_target (t));
565 target_close (t);
566 }
567 }
568
569 /* See target.h. */
570
571 void
572 target_stack::push (target_ops *t)
573 {
574 t->incref ();
575
576 strata stratum = t->stratum ();
577
578 if (stratum == process_stratum)
579 connection_list_add (as_process_stratum_target (t));
580
581 /* If there's already a target at this stratum, remove it. */
582
583 if (m_stack[stratum] != NULL)
584 unpush (m_stack[stratum]);
585
586 /* Now add the new one. */
587 m_stack[stratum] = t;
588
589 if (m_top < stratum)
590 m_top = stratum;
591 }
592
593 /* See target.h. */
594
595 void
596 push_target (struct target_ops *t)
597 {
598 current_inferior ()->push_target (t);
599 }
600
601 /* See target.h. */
602
603 void
604 push_target (target_ops_up &&t)
605 {
606 current_inferior ()->push_target (t.get ());
607 t.release ();
608 }
609
610 /* See target.h. */
611
612 int
613 unpush_target (struct target_ops *t)
614 {
615 return current_inferior ()->unpush_target (t);
616 }
617
618 /* See target.h. */
619
620 bool
621 target_stack::unpush (target_ops *t)
622 {
623 gdb_assert (t != NULL);
624
625 strata stratum = t->stratum ();
626
627 if (stratum == dummy_stratum)
628 internal_error (__FILE__, __LINE__,
629 _("Attempt to unpush the dummy target"));
630
631 /* Look for the specified target. Note that a target can only occur
632 once in the target stack. */
633
634 if (m_stack[stratum] != t)
635 {
636 /* If T wasn't pushed, quit. Only open targets should be
637 closed. */
638 return false;
639 }
640
641 /* Unchain the target. */
642 m_stack[stratum] = NULL;
643
644 if (m_top == stratum)
645 m_top = t->beneath ()->stratum ();
646
647 /* Finally close the target, if there are no inferiors
648 referencing this target still. Note we do this after unchaining,
649 so any target method calls from within the target_close
650 implementation don't end up in T anymore. Do leave the target
651 open if we have are other inferiors referencing this target
652 still. */
653 decref_target (t);
654
655 return true;
656 }
657
658 /* Unpush TARGET and assert that it worked. */
659
660 static void
661 unpush_target_and_assert (struct target_ops *target)
662 {
663 if (!unpush_target (target))
664 {
665 fprintf_unfiltered (gdb_stderr,
666 "pop_all_targets couldn't find target %s\n",
667 target->shortname ());
668 internal_error (__FILE__, __LINE__,
669 _("failed internal consistency check"));
670 }
671 }
672
673 void
674 pop_all_targets_above (enum strata above_stratum)
675 {
676 while ((int) (current_top_target ()->stratum ()) > (int) above_stratum)
677 unpush_target_and_assert (current_top_target ());
678 }
679
680 /* See target.h. */
681
682 void
683 pop_all_targets_at_and_above (enum strata stratum)
684 {
685 while ((int) (current_top_target ()->stratum ()) >= (int) stratum)
686 unpush_target_and_assert (current_top_target ());
687 }
688
689 void
690 pop_all_targets (void)
691 {
692 pop_all_targets_above (dummy_stratum);
693 }
694
695 /* Return true if T is now pushed in the current inferior's target
696 stack. Return false otherwise. */
697
698 bool
699 target_is_pushed (target_ops *t)
700 {
701 return current_inferior ()->target_is_pushed (t);
702 }
703
704 /* Default implementation of to_get_thread_local_address. */
705
706 static void
707 generic_tls_error (void)
708 {
709 throw_error (TLS_GENERIC_ERROR,
710 _("Cannot find thread-local variables on this target"));
711 }
712
713 /* Using the objfile specified in OBJFILE, find the address for the
714 current thread's thread-local storage with offset OFFSET. */
715 CORE_ADDR
716 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
717 {
718 volatile CORE_ADDR addr = 0;
719 struct target_ops *target = current_top_target ();
720 struct gdbarch *gdbarch = target_gdbarch ();
721
722 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
723 {
724 ptid_t ptid = inferior_ptid;
725
726 try
727 {
728 CORE_ADDR lm_addr;
729
730 /* Fetch the load module address for this objfile. */
731 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
732 objfile);
733
734 if (gdbarch_get_thread_local_address_p (gdbarch))
735 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
736 offset);
737 else
738 addr = target->get_thread_local_address (ptid, lm_addr, offset);
739 }
740 /* If an error occurred, print TLS related messages here. Otherwise,
741 throw the error to some higher catcher. */
742 catch (const gdb_exception &ex)
743 {
744 int objfile_is_library = (objfile->flags & OBJF_SHARED);
745
746 switch (ex.error)
747 {
748 case TLS_NO_LIBRARY_SUPPORT_ERROR:
749 error (_("Cannot find thread-local variables "
750 "in this thread library."));
751 break;
752 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
753 if (objfile_is_library)
754 error (_("Cannot find shared library `%s' in dynamic"
755 " linker's load module list"), objfile_name (objfile));
756 else
757 error (_("Cannot find executable file `%s' in dynamic"
758 " linker's load module list"), objfile_name (objfile));
759 break;
760 case TLS_NOT_ALLOCATED_YET_ERROR:
761 if (objfile_is_library)
762 error (_("The inferior has not yet allocated storage for"
763 " thread-local variables in\n"
764 "the shared library `%s'\n"
765 "for %s"),
766 objfile_name (objfile),
767 target_pid_to_str (ptid).c_str ());
768 else
769 error (_("The inferior has not yet allocated storage for"
770 " thread-local variables in\n"
771 "the executable `%s'\n"
772 "for %s"),
773 objfile_name (objfile),
774 target_pid_to_str (ptid).c_str ());
775 break;
776 case TLS_GENERIC_ERROR:
777 if (objfile_is_library)
778 error (_("Cannot find thread-local storage for %s, "
779 "shared library %s:\n%s"),
780 target_pid_to_str (ptid).c_str (),
781 objfile_name (objfile), ex.what ());
782 else
783 error (_("Cannot find thread-local storage for %s, "
784 "executable file %s:\n%s"),
785 target_pid_to_str (ptid).c_str (),
786 objfile_name (objfile), ex.what ());
787 break;
788 default:
789 throw;
790 break;
791 }
792 }
793 }
794 else
795 error (_("Cannot find thread-local variables on this target"));
796
797 return addr;
798 }
799
800 const char *
801 target_xfer_status_to_string (enum target_xfer_status status)
802 {
803 #define CASE(X) case X: return #X
804 switch (status)
805 {
806 CASE(TARGET_XFER_E_IO);
807 CASE(TARGET_XFER_UNAVAILABLE);
808 default:
809 return "<unknown>";
810 }
811 #undef CASE
812 };
813
814
815 #undef MIN
816 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
817
818 /* target_read_string -- read a null terminated string, up to LEN bytes,
819 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
820 Set *STRING to a pointer to malloc'd memory containing the data; the caller
821 is responsible for freeing it. Return the number of bytes successfully
822 read. */
823
824 int
825 target_read_string (CORE_ADDR memaddr, gdb::unique_xmalloc_ptr<char> *string,
826 int len, int *errnop)
827 {
828 int tlen, offset, i;
829 gdb_byte buf[4];
830 int errcode = 0;
831 char *buffer;
832 int buffer_allocated;
833 char *bufptr;
834 unsigned int nbytes_read = 0;
835
836 gdb_assert (string);
837
838 /* Small for testing. */
839 buffer_allocated = 4;
840 buffer = (char *) xmalloc (buffer_allocated);
841 bufptr = buffer;
842
843 while (len > 0)
844 {
845 tlen = MIN (len, 4 - (memaddr & 3));
846 offset = memaddr & 3;
847
848 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
849 if (errcode != 0)
850 {
851 /* The transfer request might have crossed the boundary to an
852 unallocated region of memory. Retry the transfer, requesting
853 a single byte. */
854 tlen = 1;
855 offset = 0;
856 errcode = target_read_memory (memaddr, buf, 1);
857 if (errcode != 0)
858 goto done;
859 }
860
861 if (bufptr - buffer + tlen > buffer_allocated)
862 {
863 unsigned int bytes;
864
865 bytes = bufptr - buffer;
866 buffer_allocated *= 2;
867 buffer = (char *) xrealloc (buffer, buffer_allocated);
868 bufptr = buffer + bytes;
869 }
870
871 for (i = 0; i < tlen; i++)
872 {
873 *bufptr++ = buf[i + offset];
874 if (buf[i + offset] == '\000')
875 {
876 nbytes_read += i + 1;
877 goto done;
878 }
879 }
880
881 memaddr += tlen;
882 len -= tlen;
883 nbytes_read += tlen;
884 }
885 done:
886 string->reset (buffer);
887 if (errnop != NULL)
888 *errnop = errcode;
889 return nbytes_read;
890 }
891
892 struct target_section_table *
893 target_get_section_table (struct target_ops *target)
894 {
895 return target->get_section_table ();
896 }
897
898 /* Find a section containing ADDR. */
899
900 struct target_section *
901 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
902 {
903 struct target_section_table *table = target_get_section_table (target);
904 struct target_section *secp;
905
906 if (table == NULL)
907 return NULL;
908
909 for (secp = table->sections; secp < table->sections_end; secp++)
910 {
911 if (addr >= secp->addr && addr < secp->endaddr)
912 return secp;
913 }
914 return NULL;
915 }
916
917
918 /* Helper for the memory xfer routines. Checks the attributes of the
919 memory region of MEMADDR against the read or write being attempted.
920 If the access is permitted returns true, otherwise returns false.
921 REGION_P is an optional output parameter. If not-NULL, it is
922 filled with a pointer to the memory region of MEMADDR. REG_LEN
923 returns LEN trimmed to the end of the region. This is how much the
924 caller can continue requesting, if the access is permitted. A
925 single xfer request must not straddle memory region boundaries. */
926
927 static int
928 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
929 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
930 struct mem_region **region_p)
931 {
932 struct mem_region *region;
933
934 region = lookup_mem_region (memaddr);
935
936 if (region_p != NULL)
937 *region_p = region;
938
939 switch (region->attrib.mode)
940 {
941 case MEM_RO:
942 if (writebuf != NULL)
943 return 0;
944 break;
945
946 case MEM_WO:
947 if (readbuf != NULL)
948 return 0;
949 break;
950
951 case MEM_FLASH:
952 /* We only support writing to flash during "load" for now. */
953 if (writebuf != NULL)
954 error (_("Writing to flash memory forbidden in this context"));
955 break;
956
957 case MEM_NONE:
958 return 0;
959 }
960
961 /* region->hi == 0 means there's no upper bound. */
962 if (memaddr + len < region->hi || region->hi == 0)
963 *reg_len = len;
964 else
965 *reg_len = region->hi - memaddr;
966
967 return 1;
968 }
969
970 /* Read memory from more than one valid target. A core file, for
971 instance, could have some of memory but delegate other bits to
972 the target below it. So, we must manually try all targets. */
973
974 enum target_xfer_status
975 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
976 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
977 ULONGEST *xfered_len)
978 {
979 enum target_xfer_status res;
980
981 do
982 {
983 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
984 readbuf, writebuf, memaddr, len,
985 xfered_len);
986 if (res == TARGET_XFER_OK)
987 break;
988
989 /* Stop if the target reports that the memory is not available. */
990 if (res == TARGET_XFER_UNAVAILABLE)
991 break;
992
993 /* We want to continue past core files to executables, but not
994 past a running target's memory. */
995 if (ops->has_all_memory ())
996 break;
997
998 ops = ops->beneath ();
999 }
1000 while (ops != NULL);
1001
1002 /* The cache works at the raw memory level. Make sure the cache
1003 gets updated with raw contents no matter what kind of memory
1004 object was originally being written. Note we do write-through
1005 first, so that if it fails, we don't write to the cache contents
1006 that never made it to the target. */
1007 if (writebuf != NULL
1008 && inferior_ptid != null_ptid
1009 && target_dcache_init_p ()
1010 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1011 {
1012 DCACHE *dcache = target_dcache_get ();
1013
1014 /* Note that writing to an area of memory which wasn't present
1015 in the cache doesn't cause it to be loaded in. */
1016 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1017 }
1018
1019 return res;
1020 }
1021
1022 /* Perform a partial memory transfer.
1023 For docs see target.h, to_xfer_partial. */
1024
1025 static enum target_xfer_status
1026 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1027 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1028 ULONGEST len, ULONGEST *xfered_len)
1029 {
1030 enum target_xfer_status res;
1031 ULONGEST reg_len;
1032 struct mem_region *region;
1033 struct inferior *inf;
1034
1035 /* For accesses to unmapped overlay sections, read directly from
1036 files. Must do this first, as MEMADDR may need adjustment. */
1037 if (readbuf != NULL && overlay_debugging)
1038 {
1039 struct obj_section *section = find_pc_overlay (memaddr);
1040
1041 if (pc_in_unmapped_range (memaddr, section))
1042 {
1043 struct target_section_table *table
1044 = target_get_section_table (ops);
1045 const char *section_name = section->the_bfd_section->name;
1046
1047 memaddr = overlay_mapped_address (memaddr, section);
1048 return section_table_xfer_memory_partial (readbuf, writebuf,
1049 memaddr, len, xfered_len,
1050 table->sections,
1051 table->sections_end,
1052 section_name);
1053 }
1054 }
1055
1056 /* Try the executable files, if "trust-readonly-sections" is set. */
1057 if (readbuf != NULL && trust_readonly)
1058 {
1059 struct target_section *secp;
1060 struct target_section_table *table;
1061
1062 secp = target_section_by_addr (ops, memaddr);
1063 if (secp != NULL
1064 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1065 {
1066 table = target_get_section_table (ops);
1067 return section_table_xfer_memory_partial (readbuf, writebuf,
1068 memaddr, len, xfered_len,
1069 table->sections,
1070 table->sections_end,
1071 NULL);
1072 }
1073 }
1074
1075 /* Try GDB's internal data cache. */
1076
1077 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1078 &region))
1079 return TARGET_XFER_E_IO;
1080
1081 if (inferior_ptid != null_ptid)
1082 inf = current_inferior ();
1083 else
1084 inf = NULL;
1085
1086 if (inf != NULL
1087 && readbuf != NULL
1088 /* The dcache reads whole cache lines; that doesn't play well
1089 with reading from a trace buffer, because reading outside of
1090 the collected memory range fails. */
1091 && get_traceframe_number () == -1
1092 && (region->attrib.cache
1093 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1094 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1095 {
1096 DCACHE *dcache = target_dcache_get_or_init ();
1097
1098 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1099 reg_len, xfered_len);
1100 }
1101
1102 /* If none of those methods found the memory we wanted, fall back
1103 to a target partial transfer. Normally a single call to
1104 to_xfer_partial is enough; if it doesn't recognize an object
1105 it will call the to_xfer_partial of the next target down.
1106 But for memory this won't do. Memory is the only target
1107 object which can be read from more than one valid target.
1108 A core file, for instance, could have some of memory but
1109 delegate other bits to the target below it. So, we must
1110 manually try all targets. */
1111
1112 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1113 xfered_len);
1114
1115 /* If we still haven't got anything, return the last error. We
1116 give up. */
1117 return res;
1118 }
1119
1120 /* Perform a partial memory transfer. For docs see target.h,
1121 to_xfer_partial. */
1122
1123 static enum target_xfer_status
1124 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1125 gdb_byte *readbuf, const gdb_byte *writebuf,
1126 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1127 {
1128 enum target_xfer_status res;
1129
1130 /* Zero length requests are ok and require no work. */
1131 if (len == 0)
1132 return TARGET_XFER_EOF;
1133
1134 memaddr = address_significant (target_gdbarch (), memaddr);
1135
1136 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1137 breakpoint insns, thus hiding out from higher layers whether
1138 there are software breakpoints inserted in the code stream. */
1139 if (readbuf != NULL)
1140 {
1141 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1142 xfered_len);
1143
1144 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1145 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1146 }
1147 else
1148 {
1149 /* A large write request is likely to be partially satisfied
1150 by memory_xfer_partial_1. We will continually malloc
1151 and free a copy of the entire write request for breakpoint
1152 shadow handling even though we only end up writing a small
1153 subset of it. Cap writes to a limit specified by the target
1154 to mitigate this. */
1155 len = std::min (ops->get_memory_xfer_limit (), len);
1156
1157 gdb::byte_vector buf (writebuf, writebuf + len);
1158 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1159 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1160 xfered_len);
1161 }
1162
1163 return res;
1164 }
1165
1166 scoped_restore_tmpl<int>
1167 make_scoped_restore_show_memory_breakpoints (int show)
1168 {
1169 return make_scoped_restore (&show_memory_breakpoints, show);
1170 }
1171
1172 /* For docs see target.h, to_xfer_partial. */
1173
1174 enum target_xfer_status
1175 target_xfer_partial (struct target_ops *ops,
1176 enum target_object object, const char *annex,
1177 gdb_byte *readbuf, const gdb_byte *writebuf,
1178 ULONGEST offset, ULONGEST len,
1179 ULONGEST *xfered_len)
1180 {
1181 enum target_xfer_status retval;
1182
1183 /* Transfer is done when LEN is zero. */
1184 if (len == 0)
1185 return TARGET_XFER_EOF;
1186
1187 if (writebuf && !may_write_memory)
1188 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1189 core_addr_to_string_nz (offset), plongest (len));
1190
1191 *xfered_len = 0;
1192
1193 /* If this is a memory transfer, let the memory-specific code
1194 have a look at it instead. Memory transfers are more
1195 complicated. */
1196 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1197 || object == TARGET_OBJECT_CODE_MEMORY)
1198 retval = memory_xfer_partial (ops, object, readbuf,
1199 writebuf, offset, len, xfered_len);
1200 else if (object == TARGET_OBJECT_RAW_MEMORY)
1201 {
1202 /* Skip/avoid accessing the target if the memory region
1203 attributes block the access. Check this here instead of in
1204 raw_memory_xfer_partial as otherwise we'd end up checking
1205 this twice in the case of the memory_xfer_partial path is
1206 taken; once before checking the dcache, and another in the
1207 tail call to raw_memory_xfer_partial. */
1208 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1209 NULL))
1210 return TARGET_XFER_E_IO;
1211
1212 /* Request the normal memory object from other layers. */
1213 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1214 xfered_len);
1215 }
1216 else
1217 retval = ops->xfer_partial (object, annex, readbuf,
1218 writebuf, offset, len, xfered_len);
1219
1220 if (targetdebug)
1221 {
1222 const unsigned char *myaddr = NULL;
1223
1224 fprintf_unfiltered (gdb_stdlog,
1225 "%s:target_xfer_partial "
1226 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1227 ops->shortname (),
1228 (int) object,
1229 (annex ? annex : "(null)"),
1230 host_address_to_string (readbuf),
1231 host_address_to_string (writebuf),
1232 core_addr_to_string_nz (offset),
1233 pulongest (len), retval,
1234 pulongest (*xfered_len));
1235
1236 if (readbuf)
1237 myaddr = readbuf;
1238 if (writebuf)
1239 myaddr = writebuf;
1240 if (retval == TARGET_XFER_OK && myaddr != NULL)
1241 {
1242 int i;
1243
1244 fputs_unfiltered (", bytes =", gdb_stdlog);
1245 for (i = 0; i < *xfered_len; i++)
1246 {
1247 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1248 {
1249 if (targetdebug < 2 && i > 0)
1250 {
1251 fprintf_unfiltered (gdb_stdlog, " ...");
1252 break;
1253 }
1254 fprintf_unfiltered (gdb_stdlog, "\n");
1255 }
1256
1257 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1258 }
1259 }
1260
1261 fputc_unfiltered ('\n', gdb_stdlog);
1262 }
1263
1264 /* Check implementations of to_xfer_partial update *XFERED_LEN
1265 properly. Do assertion after printing debug messages, so that we
1266 can find more clues on assertion failure from debugging messages. */
1267 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1268 gdb_assert (*xfered_len > 0);
1269
1270 return retval;
1271 }
1272
1273 /* Read LEN bytes of target memory at address MEMADDR, placing the
1274 results in GDB's memory at MYADDR. Returns either 0 for success or
1275 -1 if any error occurs.
1276
1277 If an error occurs, no guarantee is made about the contents of the data at
1278 MYADDR. In particular, the caller should not depend upon partial reads
1279 filling the buffer with good data. There is no way for the caller to know
1280 how much good data might have been transfered anyway. Callers that can
1281 deal with partial reads should call target_read (which will retry until
1282 it makes no progress, and then return how much was transferred). */
1283
1284 int
1285 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1286 {
1287 if (target_read (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1288 myaddr, memaddr, len) == len)
1289 return 0;
1290 else
1291 return -1;
1292 }
1293
1294 /* See target/target.h. */
1295
1296 int
1297 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1298 {
1299 gdb_byte buf[4];
1300 int r;
1301
1302 r = target_read_memory (memaddr, buf, sizeof buf);
1303 if (r != 0)
1304 return r;
1305 *result = extract_unsigned_integer (buf, sizeof buf,
1306 gdbarch_byte_order (target_gdbarch ()));
1307 return 0;
1308 }
1309
1310 /* Like target_read_memory, but specify explicitly that this is a read
1311 from the target's raw memory. That is, this read bypasses the
1312 dcache, breakpoint shadowing, etc. */
1313
1314 int
1315 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1316 {
1317 if (target_read (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1318 myaddr, memaddr, len) == len)
1319 return 0;
1320 else
1321 return -1;
1322 }
1323
1324 /* Like target_read_memory, but specify explicitly that this is a read from
1325 the target's stack. This may trigger different cache behavior. */
1326
1327 int
1328 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1329 {
1330 if (target_read (current_top_target (), TARGET_OBJECT_STACK_MEMORY, NULL,
1331 myaddr, memaddr, len) == len)
1332 return 0;
1333 else
1334 return -1;
1335 }
1336
1337 /* Like target_read_memory, but specify explicitly that this is a read from
1338 the target's code. This may trigger different cache behavior. */
1339
1340 int
1341 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1342 {
1343 if (target_read (current_top_target (), TARGET_OBJECT_CODE_MEMORY, NULL,
1344 myaddr, memaddr, len) == len)
1345 return 0;
1346 else
1347 return -1;
1348 }
1349
1350 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1351 Returns either 0 for success or -1 if any error occurs. If an
1352 error occurs, no guarantee is made about how much data got written.
1353 Callers that can deal with partial writes should call
1354 target_write. */
1355
1356 int
1357 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1358 {
1359 if (target_write (current_top_target (), TARGET_OBJECT_MEMORY, NULL,
1360 myaddr, memaddr, len) == len)
1361 return 0;
1362 else
1363 return -1;
1364 }
1365
1366 /* Write LEN bytes from MYADDR to target raw memory at address
1367 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1368 If an error occurs, no guarantee is made about how much data got
1369 written. Callers that can deal with partial writes should call
1370 target_write. */
1371
1372 int
1373 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1374 {
1375 if (target_write (current_top_target (), TARGET_OBJECT_RAW_MEMORY, NULL,
1376 myaddr, memaddr, len) == len)
1377 return 0;
1378 else
1379 return -1;
1380 }
1381
1382 /* Fetch the target's memory map. */
1383
1384 std::vector<mem_region>
1385 target_memory_map (void)
1386 {
1387 std::vector<mem_region> result = current_top_target ()->memory_map ();
1388 if (result.empty ())
1389 return result;
1390
1391 std::sort (result.begin (), result.end ());
1392
1393 /* Check that regions do not overlap. Simultaneously assign
1394 a numbering for the "mem" commands to use to refer to
1395 each region. */
1396 mem_region *last_one = NULL;
1397 for (size_t ix = 0; ix < result.size (); ix++)
1398 {
1399 mem_region *this_one = &result[ix];
1400 this_one->number = ix;
1401
1402 if (last_one != NULL && last_one->hi > this_one->lo)
1403 {
1404 warning (_("Overlapping regions in memory map: ignoring"));
1405 return std::vector<mem_region> ();
1406 }
1407
1408 last_one = this_one;
1409 }
1410
1411 return result;
1412 }
1413
1414 void
1415 target_flash_erase (ULONGEST address, LONGEST length)
1416 {
1417 current_top_target ()->flash_erase (address, length);
1418 }
1419
1420 void
1421 target_flash_done (void)
1422 {
1423 current_top_target ()->flash_done ();
1424 }
1425
1426 static void
1427 show_trust_readonly (struct ui_file *file, int from_tty,
1428 struct cmd_list_element *c, const char *value)
1429 {
1430 fprintf_filtered (file,
1431 _("Mode for reading from readonly sections is %s.\n"),
1432 value);
1433 }
1434
1435 /* Target vector read/write partial wrapper functions. */
1436
1437 static enum target_xfer_status
1438 target_read_partial (struct target_ops *ops,
1439 enum target_object object,
1440 const char *annex, gdb_byte *buf,
1441 ULONGEST offset, ULONGEST len,
1442 ULONGEST *xfered_len)
1443 {
1444 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1445 xfered_len);
1446 }
1447
1448 static enum target_xfer_status
1449 target_write_partial (struct target_ops *ops,
1450 enum target_object object,
1451 const char *annex, const gdb_byte *buf,
1452 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1453 {
1454 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1455 xfered_len);
1456 }
1457
1458 /* Wrappers to perform the full transfer. */
1459
1460 /* For docs on target_read see target.h. */
1461
1462 LONGEST
1463 target_read (struct target_ops *ops,
1464 enum target_object object,
1465 const char *annex, gdb_byte *buf,
1466 ULONGEST offset, LONGEST len)
1467 {
1468 LONGEST xfered_total = 0;
1469 int unit_size = 1;
1470
1471 /* If we are reading from a memory object, find the length of an addressable
1472 unit for that architecture. */
1473 if (object == TARGET_OBJECT_MEMORY
1474 || object == TARGET_OBJECT_STACK_MEMORY
1475 || object == TARGET_OBJECT_CODE_MEMORY
1476 || object == TARGET_OBJECT_RAW_MEMORY)
1477 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1478
1479 while (xfered_total < len)
1480 {
1481 ULONGEST xfered_partial;
1482 enum target_xfer_status status;
1483
1484 status = target_read_partial (ops, object, annex,
1485 buf + xfered_total * unit_size,
1486 offset + xfered_total, len - xfered_total,
1487 &xfered_partial);
1488
1489 /* Call an observer, notifying them of the xfer progress? */
1490 if (status == TARGET_XFER_EOF)
1491 return xfered_total;
1492 else if (status == TARGET_XFER_OK)
1493 {
1494 xfered_total += xfered_partial;
1495 QUIT;
1496 }
1497 else
1498 return TARGET_XFER_E_IO;
1499
1500 }
1501 return len;
1502 }
1503
1504 /* Assuming that the entire [begin, end) range of memory cannot be
1505 read, try to read whatever subrange is possible to read.
1506
1507 The function returns, in RESULT, either zero or one memory block.
1508 If there's a readable subrange at the beginning, it is completely
1509 read and returned. Any further readable subrange will not be read.
1510 Otherwise, if there's a readable subrange at the end, it will be
1511 completely read and returned. Any readable subranges before it
1512 (obviously, not starting at the beginning), will be ignored. In
1513 other cases -- either no readable subrange, or readable subrange(s)
1514 that is neither at the beginning, or end, nothing is returned.
1515
1516 The purpose of this function is to handle a read across a boundary
1517 of accessible memory in a case when memory map is not available.
1518 The above restrictions are fine for this case, but will give
1519 incorrect results if the memory is 'patchy'. However, supporting
1520 'patchy' memory would require trying to read every single byte,
1521 and it seems unacceptable solution. Explicit memory map is
1522 recommended for this case -- and target_read_memory_robust will
1523 take care of reading multiple ranges then. */
1524
1525 static void
1526 read_whatever_is_readable (struct target_ops *ops,
1527 const ULONGEST begin, const ULONGEST end,
1528 int unit_size,
1529 std::vector<memory_read_result> *result)
1530 {
1531 ULONGEST current_begin = begin;
1532 ULONGEST current_end = end;
1533 int forward;
1534 ULONGEST xfered_len;
1535
1536 /* If we previously failed to read 1 byte, nothing can be done here. */
1537 if (end - begin <= 1)
1538 return;
1539
1540 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
1541
1542 /* Check that either first or the last byte is readable, and give up
1543 if not. This heuristic is meant to permit reading accessible memory
1544 at the boundary of accessible region. */
1545 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1546 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
1547 {
1548 forward = 1;
1549 ++current_begin;
1550 }
1551 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1552 buf.get () + (end - begin) - 1, end - 1, 1,
1553 &xfered_len) == TARGET_XFER_OK)
1554 {
1555 forward = 0;
1556 --current_end;
1557 }
1558 else
1559 return;
1560
1561 /* Loop invariant is that the [current_begin, current_end) was previously
1562 found to be not readable as a whole.
1563
1564 Note loop condition -- if the range has 1 byte, we can't divide the range
1565 so there's no point trying further. */
1566 while (current_end - current_begin > 1)
1567 {
1568 ULONGEST first_half_begin, first_half_end;
1569 ULONGEST second_half_begin, second_half_end;
1570 LONGEST xfer;
1571 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
1572
1573 if (forward)
1574 {
1575 first_half_begin = current_begin;
1576 first_half_end = middle;
1577 second_half_begin = middle;
1578 second_half_end = current_end;
1579 }
1580 else
1581 {
1582 first_half_begin = middle;
1583 first_half_end = current_end;
1584 second_half_begin = current_begin;
1585 second_half_end = middle;
1586 }
1587
1588 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1589 buf.get () + (first_half_begin - begin) * unit_size,
1590 first_half_begin,
1591 first_half_end - first_half_begin);
1592
1593 if (xfer == first_half_end - first_half_begin)
1594 {
1595 /* This half reads up fine. So, the error must be in the
1596 other half. */
1597 current_begin = second_half_begin;
1598 current_end = second_half_end;
1599 }
1600 else
1601 {
1602 /* This half is not readable. Because we've tried one byte, we
1603 know some part of this half if actually readable. Go to the next
1604 iteration to divide again and try to read.
1605
1606 We don't handle the other half, because this function only tries
1607 to read a single readable subrange. */
1608 current_begin = first_half_begin;
1609 current_end = first_half_end;
1610 }
1611 }
1612
1613 if (forward)
1614 {
1615 /* The [begin, current_begin) range has been read. */
1616 result->emplace_back (begin, current_end, std::move (buf));
1617 }
1618 else
1619 {
1620 /* The [current_end, end) range has been read. */
1621 LONGEST region_len = end - current_end;
1622
1623 gdb::unique_xmalloc_ptr<gdb_byte> data
1624 ((gdb_byte *) xmalloc (region_len * unit_size));
1625 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
1626 region_len * unit_size);
1627 result->emplace_back (current_end, end, std::move (data));
1628 }
1629 }
1630
1631 std::vector<memory_read_result>
1632 read_memory_robust (struct target_ops *ops,
1633 const ULONGEST offset, const LONGEST len)
1634 {
1635 std::vector<memory_read_result> result;
1636 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1637
1638 LONGEST xfered_total = 0;
1639 while (xfered_total < len)
1640 {
1641 struct mem_region *region = lookup_mem_region (offset + xfered_total);
1642 LONGEST region_len;
1643
1644 /* If there is no explicit region, a fake one should be created. */
1645 gdb_assert (region);
1646
1647 if (region->hi == 0)
1648 region_len = len - xfered_total;
1649 else
1650 region_len = region->hi - offset;
1651
1652 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1653 {
1654 /* Cannot read this region. Note that we can end up here only
1655 if the region is explicitly marked inaccessible, or
1656 'inaccessible-by-default' is in effect. */
1657 xfered_total += region_len;
1658 }
1659 else
1660 {
1661 LONGEST to_read = std::min (len - xfered_total, region_len);
1662 gdb::unique_xmalloc_ptr<gdb_byte> buffer
1663 ((gdb_byte *) xmalloc (to_read * unit_size));
1664
1665 LONGEST xfered_partial =
1666 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
1667 offset + xfered_total, to_read);
1668 /* Call an observer, notifying them of the xfer progress? */
1669 if (xfered_partial <= 0)
1670 {
1671 /* Got an error reading full chunk. See if maybe we can read
1672 some subrange. */
1673 read_whatever_is_readable (ops, offset + xfered_total,
1674 offset + xfered_total + to_read,
1675 unit_size, &result);
1676 xfered_total += to_read;
1677 }
1678 else
1679 {
1680 result.emplace_back (offset + xfered_total,
1681 offset + xfered_total + xfered_partial,
1682 std::move (buffer));
1683 xfered_total += xfered_partial;
1684 }
1685 QUIT;
1686 }
1687 }
1688
1689 return result;
1690 }
1691
1692
1693 /* An alternative to target_write with progress callbacks. */
1694
1695 LONGEST
1696 target_write_with_progress (struct target_ops *ops,
1697 enum target_object object,
1698 const char *annex, const gdb_byte *buf,
1699 ULONGEST offset, LONGEST len,
1700 void (*progress) (ULONGEST, void *), void *baton)
1701 {
1702 LONGEST xfered_total = 0;
1703 int unit_size = 1;
1704
1705 /* If we are writing to a memory object, find the length of an addressable
1706 unit for that architecture. */
1707 if (object == TARGET_OBJECT_MEMORY
1708 || object == TARGET_OBJECT_STACK_MEMORY
1709 || object == TARGET_OBJECT_CODE_MEMORY
1710 || object == TARGET_OBJECT_RAW_MEMORY)
1711 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
1712
1713 /* Give the progress callback a chance to set up. */
1714 if (progress)
1715 (*progress) (0, baton);
1716
1717 while (xfered_total < len)
1718 {
1719 ULONGEST xfered_partial;
1720 enum target_xfer_status status;
1721
1722 status = target_write_partial (ops, object, annex,
1723 buf + xfered_total * unit_size,
1724 offset + xfered_total, len - xfered_total,
1725 &xfered_partial);
1726
1727 if (status != TARGET_XFER_OK)
1728 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
1729
1730 if (progress)
1731 (*progress) (xfered_partial, baton);
1732
1733 xfered_total += xfered_partial;
1734 QUIT;
1735 }
1736 return len;
1737 }
1738
1739 /* For docs on target_write see target.h. */
1740
1741 LONGEST
1742 target_write (struct target_ops *ops,
1743 enum target_object object,
1744 const char *annex, const gdb_byte *buf,
1745 ULONGEST offset, LONGEST len)
1746 {
1747 return target_write_with_progress (ops, object, annex, buf, offset, len,
1748 NULL, NULL);
1749 }
1750
1751 /* Help for target_read_alloc and target_read_stralloc. See their comments
1752 for details. */
1753
1754 template <typename T>
1755 gdb::optional<gdb::def_vector<T>>
1756 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1757 const char *annex)
1758 {
1759 gdb::def_vector<T> buf;
1760 size_t buf_pos = 0;
1761 const int chunk = 4096;
1762
1763 /* This function does not have a length parameter; it reads the
1764 entire OBJECT). Also, it doesn't support objects fetched partly
1765 from one target and partly from another (in a different stratum,
1766 e.g. a core file and an executable). Both reasons make it
1767 unsuitable for reading memory. */
1768 gdb_assert (object != TARGET_OBJECT_MEMORY);
1769
1770 /* Start by reading up to 4K at a time. The target will throttle
1771 this number down if necessary. */
1772 while (1)
1773 {
1774 ULONGEST xfered_len;
1775 enum target_xfer_status status;
1776
1777 buf.resize (buf_pos + chunk);
1778
1779 status = target_read_partial (ops, object, annex,
1780 (gdb_byte *) &buf[buf_pos],
1781 buf_pos, chunk,
1782 &xfered_len);
1783
1784 if (status == TARGET_XFER_EOF)
1785 {
1786 /* Read all there was. */
1787 buf.resize (buf_pos);
1788 return buf;
1789 }
1790 else if (status != TARGET_XFER_OK)
1791 {
1792 /* An error occurred. */
1793 return {};
1794 }
1795
1796 buf_pos += xfered_len;
1797
1798 QUIT;
1799 }
1800 }
1801
1802 /* See target.h */
1803
1804 gdb::optional<gdb::byte_vector>
1805 target_read_alloc (struct target_ops *ops, enum target_object object,
1806 const char *annex)
1807 {
1808 return target_read_alloc_1<gdb_byte> (ops, object, annex);
1809 }
1810
1811 /* See target.h. */
1812
1813 gdb::optional<gdb::char_vector>
1814 target_read_stralloc (struct target_ops *ops, enum target_object object,
1815 const char *annex)
1816 {
1817 gdb::optional<gdb::char_vector> buf
1818 = target_read_alloc_1<char> (ops, object, annex);
1819
1820 if (!buf)
1821 return {};
1822
1823 if (buf->empty () || buf->back () != '\0')
1824 buf->push_back ('\0');
1825
1826 /* Check for embedded NUL bytes; but allow trailing NULs. */
1827 for (auto it = std::find (buf->begin (), buf->end (), '\0');
1828 it != buf->end (); it++)
1829 if (*it != '\0')
1830 {
1831 warning (_("target object %d, annex %s, "
1832 "contained unexpected null characters"),
1833 (int) object, annex ? annex : "(none)");
1834 break;
1835 }
1836
1837 return buf;
1838 }
1839
1840 /* Memory transfer methods. */
1841
1842 void
1843 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1844 LONGEST len)
1845 {
1846 /* This method is used to read from an alternate, non-current
1847 target. This read must bypass the overlay support (as symbols
1848 don't match this target), and GDB's internal cache (wrong cache
1849 for this target). */
1850 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1851 != len)
1852 memory_error (TARGET_XFER_E_IO, addr);
1853 }
1854
1855 ULONGEST
1856 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1857 int len, enum bfd_endian byte_order)
1858 {
1859 gdb_byte buf[sizeof (ULONGEST)];
1860
1861 gdb_assert (len <= sizeof (buf));
1862 get_target_memory (ops, addr, buf, len);
1863 return extract_unsigned_integer (buf, len, byte_order);
1864 }
1865
1866 /* See target.h. */
1867
1868 int
1869 target_insert_breakpoint (struct gdbarch *gdbarch,
1870 struct bp_target_info *bp_tgt)
1871 {
1872 if (!may_insert_breakpoints)
1873 {
1874 warning (_("May not insert breakpoints"));
1875 return 1;
1876 }
1877
1878 return current_top_target ()->insert_breakpoint (gdbarch, bp_tgt);
1879 }
1880
1881 /* See target.h. */
1882
1883 int
1884 target_remove_breakpoint (struct gdbarch *gdbarch,
1885 struct bp_target_info *bp_tgt,
1886 enum remove_bp_reason reason)
1887 {
1888 /* This is kind of a weird case to handle, but the permission might
1889 have been changed after breakpoints were inserted - in which case
1890 we should just take the user literally and assume that any
1891 breakpoints should be left in place. */
1892 if (!may_insert_breakpoints)
1893 {
1894 warning (_("May not remove breakpoints"));
1895 return 1;
1896 }
1897
1898 return current_top_target ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1899 }
1900
1901 static void
1902 info_target_command (const char *args, int from_tty)
1903 {
1904 int has_all_mem = 0;
1905
1906 if (symfile_objfile != NULL)
1907 printf_unfiltered (_("Symbols from \"%s\".\n"),
1908 objfile_name (symfile_objfile));
1909
1910 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
1911 {
1912 if (!t->has_memory ())
1913 continue;
1914
1915 if ((int) (t->stratum ()) <= (int) dummy_stratum)
1916 continue;
1917 if (has_all_mem)
1918 printf_unfiltered (_("\tWhile running this, "
1919 "GDB does not access memory from...\n"));
1920 printf_unfiltered ("%s:\n", t->longname ());
1921 t->files_info ();
1922 has_all_mem = t->has_all_memory ();
1923 }
1924 }
1925
1926 /* This function is called before any new inferior is created, e.g.
1927 by running a program, attaching, or connecting to a target.
1928 It cleans up any state from previous invocations which might
1929 change between runs. This is a subset of what target_preopen
1930 resets (things which might change between targets). */
1931
1932 void
1933 target_pre_inferior (int from_tty)
1934 {
1935 /* Clear out solib state. Otherwise the solib state of the previous
1936 inferior might have survived and is entirely wrong for the new
1937 target. This has been observed on GNU/Linux using glibc 2.3. How
1938 to reproduce:
1939
1940 bash$ ./foo&
1941 [1] 4711
1942 bash$ ./foo&
1943 [1] 4712
1944 bash$ gdb ./foo
1945 [...]
1946 (gdb) attach 4711
1947 (gdb) detach
1948 (gdb) attach 4712
1949 Cannot access memory at address 0xdeadbeef
1950 */
1951
1952 /* In some OSs, the shared library list is the same/global/shared
1953 across inferiors. If code is shared between processes, so are
1954 memory regions and features. */
1955 if (!gdbarch_has_global_solist (target_gdbarch ()))
1956 {
1957 no_shared_libraries (NULL, from_tty);
1958
1959 invalidate_target_mem_regions ();
1960
1961 target_clear_description ();
1962 }
1963
1964 /* attach_flag may be set if the previous process associated with
1965 the inferior was attached to. */
1966 current_inferior ()->attach_flag = 0;
1967
1968 current_inferior ()->highest_thread_num = 0;
1969
1970 agent_capability_invalidate ();
1971 }
1972
1973 /* This is to be called by the open routine before it does
1974 anything. */
1975
1976 void
1977 target_preopen (int from_tty)
1978 {
1979 dont_repeat ();
1980
1981 if (current_inferior ()->pid != 0)
1982 {
1983 if (!from_tty
1984 || !target_has_execution
1985 || query (_("A program is being debugged already. Kill it? ")))
1986 {
1987 /* Core inferiors actually should be detached, not
1988 killed. */
1989 if (target_has_execution)
1990 target_kill ();
1991 else
1992 target_detach (current_inferior (), 0);
1993 }
1994 else
1995 error (_("Program not killed."));
1996 }
1997
1998 /* Calling target_kill may remove the target from the stack. But if
1999 it doesn't (which seems like a win for UDI), remove it now. */
2000 /* Leave the exec target, though. The user may be switching from a
2001 live process to a core of the same program. */
2002 pop_all_targets_above (file_stratum);
2003
2004 target_pre_inferior (from_tty);
2005 }
2006
2007 /* See target.h. */
2008
2009 void
2010 target_detach (inferior *inf, int from_tty)
2011 {
2012 /* After we have detached, we will clear the register cache for this inferior
2013 by calling registers_changed_ptid. We must save the pid_ptid before
2014 detaching, as the target detach method will clear inf->pid. */
2015 ptid_t save_pid_ptid = ptid_t (inf->pid);
2016
2017 /* As long as some to_detach implementations rely on the current_inferior
2018 (either directly, or indirectly, like through target_gdbarch or by
2019 reading memory), INF needs to be the current inferior. When that
2020 requirement will become no longer true, then we can remove this
2021 assertion. */
2022 gdb_assert (inf == current_inferior ());
2023
2024 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2025 /* Don't remove global breakpoints here. They're removed on
2026 disconnection from the target. */
2027 ;
2028 else
2029 /* If we're in breakpoints-always-inserted mode, have to remove
2030 breakpoints before detaching. */
2031 remove_breakpoints_inf (current_inferior ());
2032
2033 prepare_for_detach ();
2034
2035 /* Hold a strong reference because detaching may unpush the
2036 target. */
2037 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2038
2039 current_top_target ()->detach (inf, from_tty);
2040
2041 process_stratum_target *proc_target
2042 = as_process_stratum_target (proc_target_ref.get ());
2043
2044 registers_changed_ptid (proc_target, save_pid_ptid);
2045
2046 /* We have to ensure we have no frame cache left. Normally,
2047 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2048 inferior_ptid matches save_pid_ptid, but in our case, it does not
2049 call it, as inferior_ptid has been reset. */
2050 reinit_frame_cache ();
2051 }
2052
2053 void
2054 target_disconnect (const char *args, int from_tty)
2055 {
2056 /* If we're in breakpoints-always-inserted mode or if breakpoints
2057 are global across processes, we have to remove them before
2058 disconnecting. */
2059 remove_breakpoints ();
2060
2061 current_top_target ()->disconnect (args, from_tty);
2062 }
2063
2064 /* See target/target.h. */
2065
2066 ptid_t
2067 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2068 {
2069 return current_top_target ()->wait (ptid, status, options);
2070 }
2071
2072 /* See target.h. */
2073
2074 ptid_t
2075 default_target_wait (struct target_ops *ops,
2076 ptid_t ptid, struct target_waitstatus *status,
2077 int options)
2078 {
2079 status->kind = TARGET_WAITKIND_IGNORE;
2080 return minus_one_ptid;
2081 }
2082
2083 std::string
2084 target_pid_to_str (ptid_t ptid)
2085 {
2086 return current_top_target ()->pid_to_str (ptid);
2087 }
2088
2089 const char *
2090 target_thread_name (struct thread_info *info)
2091 {
2092 gdb_assert (info->inf == current_inferior ());
2093
2094 return current_top_target ()->thread_name (info);
2095 }
2096
2097 struct thread_info *
2098 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2099 int handle_len,
2100 struct inferior *inf)
2101 {
2102 return current_top_target ()->thread_handle_to_thread_info (thread_handle,
2103 handle_len, inf);
2104 }
2105
2106 /* See target.h. */
2107
2108 gdb::byte_vector
2109 target_thread_info_to_thread_handle (struct thread_info *tip)
2110 {
2111 return current_top_target ()->thread_info_to_thread_handle (tip);
2112 }
2113
2114 void
2115 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2116 {
2117 process_stratum_target *curr_target = current_inferior ()->process_target ();
2118
2119 target_dcache_invalidate ();
2120
2121 current_top_target ()->resume (ptid, step, signal);
2122
2123 registers_changed_ptid (curr_target, ptid);
2124 /* We only set the internal executing state here. The user/frontend
2125 running state is set at a higher level. This also clears the
2126 thread's stop_pc as side effect. */
2127 set_executing (curr_target, ptid, true);
2128 clear_inline_frame_state (curr_target, ptid);
2129 }
2130
2131 /* If true, target_commit_resume is a nop. */
2132 static int defer_target_commit_resume;
2133
2134 /* See target.h. */
2135
2136 void
2137 target_commit_resume (void)
2138 {
2139 if (defer_target_commit_resume)
2140 return;
2141
2142 current_top_target ()->commit_resume ();
2143 }
2144
2145 /* See target.h. */
2146
2147 scoped_restore_tmpl<int>
2148 make_scoped_defer_target_commit_resume ()
2149 {
2150 return make_scoped_restore (&defer_target_commit_resume, 1);
2151 }
2152
2153 void
2154 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2155 {
2156 current_top_target ()->pass_signals (pass_signals);
2157 }
2158
2159 void
2160 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2161 {
2162 current_top_target ()->program_signals (program_signals);
2163 }
2164
2165 static bool
2166 default_follow_fork (struct target_ops *self, bool follow_child,
2167 bool detach_fork)
2168 {
2169 /* Some target returned a fork event, but did not know how to follow it. */
2170 internal_error (__FILE__, __LINE__,
2171 _("could not find a target to follow fork"));
2172 }
2173
2174 /* Look through the list of possible targets for a target that can
2175 follow forks. */
2176
2177 bool
2178 target_follow_fork (bool follow_child, bool detach_fork)
2179 {
2180 return current_top_target ()->follow_fork (follow_child, detach_fork);
2181 }
2182
2183 /* Target wrapper for follow exec hook. */
2184
2185 void
2186 target_follow_exec (struct inferior *inf, const char *execd_pathname)
2187 {
2188 current_top_target ()->follow_exec (inf, execd_pathname);
2189 }
2190
2191 static void
2192 default_mourn_inferior (struct target_ops *self)
2193 {
2194 internal_error (__FILE__, __LINE__,
2195 _("could not find a target to follow mourn inferior"));
2196 }
2197
2198 void
2199 target_mourn_inferior (ptid_t ptid)
2200 {
2201 gdb_assert (ptid == inferior_ptid);
2202 current_top_target ()->mourn_inferior ();
2203
2204 /* We no longer need to keep handles on any of the object files.
2205 Make sure to release them to avoid unnecessarily locking any
2206 of them while we're not actually debugging. */
2207 bfd_cache_close_all ();
2208 }
2209
2210 /* Look for a target which can describe architectural features, starting
2211 from TARGET. If we find one, return its description. */
2212
2213 const struct target_desc *
2214 target_read_description (struct target_ops *target)
2215 {
2216 return target->read_description ();
2217 }
2218
2219 /* This implements a basic search of memory, reading target memory and
2220 performing the search here (as opposed to performing the search in on the
2221 target side with, for example, gdbserver). */
2222
2223 int
2224 simple_search_memory (struct target_ops *ops,
2225 CORE_ADDR start_addr, ULONGEST search_space_len,
2226 const gdb_byte *pattern, ULONGEST pattern_len,
2227 CORE_ADDR *found_addrp)
2228 {
2229 /* NOTE: also defined in find.c testcase. */
2230 #define SEARCH_CHUNK_SIZE 16000
2231 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2232 /* Buffer to hold memory contents for searching. */
2233 unsigned search_buf_size;
2234
2235 search_buf_size = chunk_size + pattern_len - 1;
2236
2237 /* No point in trying to allocate a buffer larger than the search space. */
2238 if (search_space_len < search_buf_size)
2239 search_buf_size = search_space_len;
2240
2241 gdb::byte_vector search_buf (search_buf_size);
2242
2243 /* Prime the search buffer. */
2244
2245 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2246 search_buf.data (), start_addr, search_buf_size)
2247 != search_buf_size)
2248 {
2249 warning (_("Unable to access %s bytes of target "
2250 "memory at %s, halting search."),
2251 pulongest (search_buf_size), hex_string (start_addr));
2252 return -1;
2253 }
2254
2255 /* Perform the search.
2256
2257 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2258 When we've scanned N bytes we copy the trailing bytes to the start and
2259 read in another N bytes. */
2260
2261 while (search_space_len >= pattern_len)
2262 {
2263 gdb_byte *found_ptr;
2264 unsigned nr_search_bytes
2265 = std::min (search_space_len, (ULONGEST) search_buf_size);
2266
2267 found_ptr = (gdb_byte *) memmem (search_buf.data (), nr_search_bytes,
2268 pattern, pattern_len);
2269
2270 if (found_ptr != NULL)
2271 {
2272 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf.data ());
2273
2274 *found_addrp = found_addr;
2275 return 1;
2276 }
2277
2278 /* Not found in this chunk, skip to next chunk. */
2279
2280 /* Don't let search_space_len wrap here, it's unsigned. */
2281 if (search_space_len >= chunk_size)
2282 search_space_len -= chunk_size;
2283 else
2284 search_space_len = 0;
2285
2286 if (search_space_len >= pattern_len)
2287 {
2288 unsigned keep_len = search_buf_size - chunk_size;
2289 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2290 int nr_to_read;
2291
2292 /* Copy the trailing part of the previous iteration to the front
2293 of the buffer for the next iteration. */
2294 gdb_assert (keep_len == pattern_len - 1);
2295 memcpy (&search_buf[0], &search_buf[chunk_size], keep_len);
2296
2297 nr_to_read = std::min (search_space_len - keep_len,
2298 (ULONGEST) chunk_size);
2299
2300 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2301 &search_buf[keep_len], read_addr,
2302 nr_to_read) != nr_to_read)
2303 {
2304 warning (_("Unable to access %s bytes of target "
2305 "memory at %s, halting search."),
2306 plongest (nr_to_read),
2307 hex_string (read_addr));
2308 return -1;
2309 }
2310
2311 start_addr += chunk_size;
2312 }
2313 }
2314
2315 /* Not found. */
2316
2317 return 0;
2318 }
2319
2320 /* Default implementation of memory-searching. */
2321
2322 static int
2323 default_search_memory (struct target_ops *self,
2324 CORE_ADDR start_addr, ULONGEST search_space_len,
2325 const gdb_byte *pattern, ULONGEST pattern_len,
2326 CORE_ADDR *found_addrp)
2327 {
2328 /* Start over from the top of the target stack. */
2329 return simple_search_memory (current_top_target (),
2330 start_addr, search_space_len,
2331 pattern, pattern_len, found_addrp);
2332 }
2333
2334 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2335 sequence of bytes in PATTERN with length PATTERN_LEN.
2336
2337 The result is 1 if found, 0 if not found, and -1 if there was an error
2338 requiring halting of the search (e.g. memory read error).
2339 If the pattern is found the address is recorded in FOUND_ADDRP. */
2340
2341 int
2342 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2343 const gdb_byte *pattern, ULONGEST pattern_len,
2344 CORE_ADDR *found_addrp)
2345 {
2346 return current_top_target ()->search_memory (start_addr, search_space_len,
2347 pattern, pattern_len, found_addrp);
2348 }
2349
2350 /* Look through the currently pushed targets. If none of them will
2351 be able to restart the currently running process, issue an error
2352 message. */
2353
2354 void
2355 target_require_runnable (void)
2356 {
2357 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2358 {
2359 /* If this target knows how to create a new program, then
2360 assume we will still be able to after killing the current
2361 one. Either killing and mourning will not pop T, or else
2362 find_default_run_target will find it again. */
2363 if (t->can_create_inferior ())
2364 return;
2365
2366 /* Do not worry about targets at certain strata that can not
2367 create inferiors. Assume they will be pushed again if
2368 necessary, and continue to the process_stratum. */
2369 if (t->stratum () > process_stratum)
2370 continue;
2371
2372 error (_("The \"%s\" target does not support \"run\". "
2373 "Try \"help target\" or \"continue\"."),
2374 t->shortname ());
2375 }
2376
2377 /* This function is only called if the target is running. In that
2378 case there should have been a process_stratum target and it
2379 should either know how to create inferiors, or not... */
2380 internal_error (__FILE__, __LINE__, _("No targets found"));
2381 }
2382
2383 /* Whether GDB is allowed to fall back to the default run target for
2384 "run", "attach", etc. when no target is connected yet. */
2385 static bool auto_connect_native_target = true;
2386
2387 static void
2388 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2389 struct cmd_list_element *c, const char *value)
2390 {
2391 fprintf_filtered (file,
2392 _("Whether GDB may automatically connect to the "
2393 "native target is %s.\n"),
2394 value);
2395 }
2396
2397 /* A pointer to the target that can respond to "run" or "attach".
2398 Native targets are always singletons and instantiated early at GDB
2399 startup. */
2400 static target_ops *the_native_target;
2401
2402 /* See target.h. */
2403
2404 void
2405 set_native_target (target_ops *target)
2406 {
2407 if (the_native_target != NULL)
2408 internal_error (__FILE__, __LINE__,
2409 _("native target already set (\"%s\")."),
2410 the_native_target->longname ());
2411
2412 the_native_target = target;
2413 }
2414
2415 /* See target.h. */
2416
2417 target_ops *
2418 get_native_target ()
2419 {
2420 return the_native_target;
2421 }
2422
2423 /* Look through the list of possible targets for a target that can
2424 execute a run or attach command without any other data. This is
2425 used to locate the default process stratum.
2426
2427 If DO_MESG is not NULL, the result is always valid (error() is
2428 called for errors); else, return NULL on error. */
2429
2430 static struct target_ops *
2431 find_default_run_target (const char *do_mesg)
2432 {
2433 if (auto_connect_native_target && the_native_target != NULL)
2434 return the_native_target;
2435
2436 if (do_mesg != NULL)
2437 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2438 return NULL;
2439 }
2440
2441 /* See target.h. */
2442
2443 struct target_ops *
2444 find_attach_target (void)
2445 {
2446 /* If a target on the current stack can attach, use it. */
2447 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2448 {
2449 if (t->can_attach ())
2450 return t;
2451 }
2452
2453 /* Otherwise, use the default run target for attaching. */
2454 return find_default_run_target ("attach");
2455 }
2456
2457 /* See target.h. */
2458
2459 struct target_ops *
2460 find_run_target (void)
2461 {
2462 /* If a target on the current stack can run, use it. */
2463 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2464 {
2465 if (t->can_create_inferior ())
2466 return t;
2467 }
2468
2469 /* Otherwise, use the default run target. */
2470 return find_default_run_target ("run");
2471 }
2472
2473 bool
2474 target_ops::info_proc (const char *args, enum info_proc_what what)
2475 {
2476 return false;
2477 }
2478
2479 /* Implement the "info proc" command. */
2480
2481 int
2482 target_info_proc (const char *args, enum info_proc_what what)
2483 {
2484 struct target_ops *t;
2485
2486 /* If we're already connected to something that can get us OS
2487 related data, use it. Otherwise, try using the native
2488 target. */
2489 t = find_target_at (process_stratum);
2490 if (t == NULL)
2491 t = find_default_run_target (NULL);
2492
2493 for (; t != NULL; t = t->beneath ())
2494 {
2495 if (t->info_proc (args, what))
2496 {
2497 if (targetdebug)
2498 fprintf_unfiltered (gdb_stdlog,
2499 "target_info_proc (\"%s\", %d)\n", args, what);
2500
2501 return 1;
2502 }
2503 }
2504
2505 return 0;
2506 }
2507
2508 static int
2509 find_default_supports_disable_randomization (struct target_ops *self)
2510 {
2511 struct target_ops *t;
2512
2513 t = find_default_run_target (NULL);
2514 if (t != NULL)
2515 return t->supports_disable_randomization ();
2516 return 0;
2517 }
2518
2519 int
2520 target_supports_disable_randomization (void)
2521 {
2522 return current_top_target ()->supports_disable_randomization ();
2523 }
2524
2525 /* See target/target.h. */
2526
2527 int
2528 target_supports_multi_process (void)
2529 {
2530 return current_top_target ()->supports_multi_process ();
2531 }
2532
2533 /* See target.h. */
2534
2535 gdb::optional<gdb::char_vector>
2536 target_get_osdata (const char *type)
2537 {
2538 struct target_ops *t;
2539
2540 /* If we're already connected to something that can get us OS
2541 related data, use it. Otherwise, try using the native
2542 target. */
2543 t = find_target_at (process_stratum);
2544 if (t == NULL)
2545 t = find_default_run_target ("get OS data");
2546
2547 if (!t)
2548 return {};
2549
2550 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2551 }
2552
2553 /* Determine the current address space of thread PTID. */
2554
2555 struct address_space *
2556 target_thread_address_space (ptid_t ptid)
2557 {
2558 struct address_space *aspace;
2559
2560 aspace = current_top_target ()->thread_address_space (ptid);
2561 gdb_assert (aspace != NULL);
2562
2563 return aspace;
2564 }
2565
2566 /* See target.h. */
2567
2568 target_ops *
2569 target_ops::beneath () const
2570 {
2571 return current_inferior ()->find_target_beneath (this);
2572 }
2573
2574 void
2575 target_ops::close ()
2576 {
2577 }
2578
2579 bool
2580 target_ops::can_attach ()
2581 {
2582 return 0;
2583 }
2584
2585 void
2586 target_ops::attach (const char *, int)
2587 {
2588 gdb_assert_not_reached ("target_ops::attach called");
2589 }
2590
2591 bool
2592 target_ops::can_create_inferior ()
2593 {
2594 return 0;
2595 }
2596
2597 void
2598 target_ops::create_inferior (const char *, const std::string &,
2599 char **, int)
2600 {
2601 gdb_assert_not_reached ("target_ops::create_inferior called");
2602 }
2603
2604 bool
2605 target_ops::can_run ()
2606 {
2607 return false;
2608 }
2609
2610 int
2611 target_can_run ()
2612 {
2613 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
2614 {
2615 if (t->can_run ())
2616 return 1;
2617 }
2618
2619 return 0;
2620 }
2621
2622 /* Target file operations. */
2623
2624 static struct target_ops *
2625 default_fileio_target (void)
2626 {
2627 struct target_ops *t;
2628
2629 /* If we're already connected to something that can perform
2630 file I/O, use it. Otherwise, try using the native target. */
2631 t = find_target_at (process_stratum);
2632 if (t != NULL)
2633 return t;
2634 return find_default_run_target ("file I/O");
2635 }
2636
2637 /* File handle for target file operations. */
2638
2639 struct fileio_fh_t
2640 {
2641 /* The target on which this file is open. NULL if the target is
2642 meanwhile closed while the handle is open. */
2643 target_ops *target;
2644
2645 /* The file descriptor on the target. */
2646 int target_fd;
2647
2648 /* Check whether this fileio_fh_t represents a closed file. */
2649 bool is_closed ()
2650 {
2651 return target_fd < 0;
2652 }
2653 };
2654
2655 /* Vector of currently open file handles. The value returned by
2656 target_fileio_open and passed as the FD argument to other
2657 target_fileio_* functions is an index into this vector. This
2658 vector's entries are never freed; instead, files are marked as
2659 closed, and the handle becomes available for reuse. */
2660 static std::vector<fileio_fh_t> fileio_fhandles;
2661
2662 /* Index into fileio_fhandles of the lowest handle that might be
2663 closed. This permits handle reuse without searching the whole
2664 list each time a new file is opened. */
2665 static int lowest_closed_fd;
2666
2667 /* Invalidate the target associated with open handles that were open
2668 on target TARG, since we're about to close (and maybe destroy) the
2669 target. The handles remain open from the client's perspective, but
2670 trying to do anything with them other than closing them will fail
2671 with EIO. */
2672
2673 static void
2674 fileio_handles_invalidate_target (target_ops *targ)
2675 {
2676 for (fileio_fh_t &fh : fileio_fhandles)
2677 if (fh.target == targ)
2678 fh.target = NULL;
2679 }
2680
2681 /* Acquire a target fileio file descriptor. */
2682
2683 static int
2684 acquire_fileio_fd (target_ops *target, int target_fd)
2685 {
2686 /* Search for closed handles to reuse. */
2687 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
2688 {
2689 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
2690
2691 if (fh.is_closed ())
2692 break;
2693 }
2694
2695 /* Push a new handle if no closed handles were found. */
2696 if (lowest_closed_fd == fileio_fhandles.size ())
2697 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
2698 else
2699 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
2700
2701 /* Should no longer be marked closed. */
2702 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
2703
2704 /* Return its index, and start the next lookup at
2705 the next index. */
2706 return lowest_closed_fd++;
2707 }
2708
2709 /* Release a target fileio file descriptor. */
2710
2711 static void
2712 release_fileio_fd (int fd, fileio_fh_t *fh)
2713 {
2714 fh->target_fd = -1;
2715 lowest_closed_fd = std::min (lowest_closed_fd, fd);
2716 }
2717
2718 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
2719
2720 static fileio_fh_t *
2721 fileio_fd_to_fh (int fd)
2722 {
2723 return &fileio_fhandles[fd];
2724 }
2725
2726
2727 /* Default implementations of file i/o methods. We don't want these
2728 to delegate automatically, because we need to know which target
2729 supported the method, in order to call it directly from within
2730 pread/pwrite, etc. */
2731
2732 int
2733 target_ops::fileio_open (struct inferior *inf, const char *filename,
2734 int flags, int mode, int warn_if_slow,
2735 int *target_errno)
2736 {
2737 *target_errno = FILEIO_ENOSYS;
2738 return -1;
2739 }
2740
2741 int
2742 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2743 ULONGEST offset, int *target_errno)
2744 {
2745 *target_errno = FILEIO_ENOSYS;
2746 return -1;
2747 }
2748
2749 int
2750 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
2751 ULONGEST offset, int *target_errno)
2752 {
2753 *target_errno = FILEIO_ENOSYS;
2754 return -1;
2755 }
2756
2757 int
2758 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
2759 {
2760 *target_errno = FILEIO_ENOSYS;
2761 return -1;
2762 }
2763
2764 int
2765 target_ops::fileio_close (int fd, int *target_errno)
2766 {
2767 *target_errno = FILEIO_ENOSYS;
2768 return -1;
2769 }
2770
2771 int
2772 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
2773 int *target_errno)
2774 {
2775 *target_errno = FILEIO_ENOSYS;
2776 return -1;
2777 }
2778
2779 gdb::optional<std::string>
2780 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
2781 int *target_errno)
2782 {
2783 *target_errno = FILEIO_ENOSYS;
2784 return {};
2785 }
2786
2787 /* Helper for target_fileio_open and
2788 target_fileio_open_warn_if_slow. */
2789
2790 static int
2791 target_fileio_open_1 (struct inferior *inf, const char *filename,
2792 int flags, int mode, int warn_if_slow,
2793 int *target_errno)
2794 {
2795 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2796 {
2797 int fd = t->fileio_open (inf, filename, flags, mode,
2798 warn_if_slow, target_errno);
2799
2800 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
2801 continue;
2802
2803 if (fd < 0)
2804 fd = -1;
2805 else
2806 fd = acquire_fileio_fd (t, fd);
2807
2808 if (targetdebug)
2809 fprintf_unfiltered (gdb_stdlog,
2810 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
2811 " = %d (%d)\n",
2812 inf == NULL ? 0 : inf->num,
2813 filename, flags, mode,
2814 warn_if_slow, fd,
2815 fd != -1 ? 0 : *target_errno);
2816 return fd;
2817 }
2818
2819 *target_errno = FILEIO_ENOSYS;
2820 return -1;
2821 }
2822
2823 /* See target.h. */
2824
2825 int
2826 target_fileio_open (struct inferior *inf, const char *filename,
2827 int flags, int mode, int *target_errno)
2828 {
2829 return target_fileio_open_1 (inf, filename, flags, mode, 0,
2830 target_errno);
2831 }
2832
2833 /* See target.h. */
2834
2835 int
2836 target_fileio_open_warn_if_slow (struct inferior *inf,
2837 const char *filename,
2838 int flags, int mode, int *target_errno)
2839 {
2840 return target_fileio_open_1 (inf, filename, flags, mode, 1,
2841 target_errno);
2842 }
2843
2844 /* See target.h. */
2845
2846 int
2847 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2848 ULONGEST offset, int *target_errno)
2849 {
2850 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2851 int ret = -1;
2852
2853 if (fh->is_closed ())
2854 *target_errno = EBADF;
2855 else if (fh->target == NULL)
2856 *target_errno = EIO;
2857 else
2858 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
2859 len, offset, target_errno);
2860
2861 if (targetdebug)
2862 fprintf_unfiltered (gdb_stdlog,
2863 "target_fileio_pwrite (%d,...,%d,%s) "
2864 "= %d (%d)\n",
2865 fd, len, pulongest (offset),
2866 ret, ret != -1 ? 0 : *target_errno);
2867 return ret;
2868 }
2869
2870 /* See target.h. */
2871
2872 int
2873 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2874 ULONGEST offset, int *target_errno)
2875 {
2876 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2877 int ret = -1;
2878
2879 if (fh->is_closed ())
2880 *target_errno = EBADF;
2881 else if (fh->target == NULL)
2882 *target_errno = EIO;
2883 else
2884 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
2885 len, offset, target_errno);
2886
2887 if (targetdebug)
2888 fprintf_unfiltered (gdb_stdlog,
2889 "target_fileio_pread (%d,...,%d,%s) "
2890 "= %d (%d)\n",
2891 fd, len, pulongest (offset),
2892 ret, ret != -1 ? 0 : *target_errno);
2893 return ret;
2894 }
2895
2896 /* See target.h. */
2897
2898 int
2899 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
2900 {
2901 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2902 int ret = -1;
2903
2904 if (fh->is_closed ())
2905 *target_errno = EBADF;
2906 else if (fh->target == NULL)
2907 *target_errno = EIO;
2908 else
2909 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
2910
2911 if (targetdebug)
2912 fprintf_unfiltered (gdb_stdlog,
2913 "target_fileio_fstat (%d) = %d (%d)\n",
2914 fd, ret, ret != -1 ? 0 : *target_errno);
2915 return ret;
2916 }
2917
2918 /* See target.h. */
2919
2920 int
2921 target_fileio_close (int fd, int *target_errno)
2922 {
2923 fileio_fh_t *fh = fileio_fd_to_fh (fd);
2924 int ret = -1;
2925
2926 if (fh->is_closed ())
2927 *target_errno = EBADF;
2928 else
2929 {
2930 if (fh->target != NULL)
2931 ret = fh->target->fileio_close (fh->target_fd,
2932 target_errno);
2933 else
2934 ret = 0;
2935 release_fileio_fd (fd, fh);
2936 }
2937
2938 if (targetdebug)
2939 fprintf_unfiltered (gdb_stdlog,
2940 "target_fileio_close (%d) = %d (%d)\n",
2941 fd, ret, ret != -1 ? 0 : *target_errno);
2942 return ret;
2943 }
2944
2945 /* See target.h. */
2946
2947 int
2948 target_fileio_unlink (struct inferior *inf, const char *filename,
2949 int *target_errno)
2950 {
2951 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2952 {
2953 int ret = t->fileio_unlink (inf, filename, target_errno);
2954
2955 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
2956 continue;
2957
2958 if (targetdebug)
2959 fprintf_unfiltered (gdb_stdlog,
2960 "target_fileio_unlink (%d,%s)"
2961 " = %d (%d)\n",
2962 inf == NULL ? 0 : inf->num, filename,
2963 ret, ret != -1 ? 0 : *target_errno);
2964 return ret;
2965 }
2966
2967 *target_errno = FILEIO_ENOSYS;
2968 return -1;
2969 }
2970
2971 /* See target.h. */
2972
2973 gdb::optional<std::string>
2974 target_fileio_readlink (struct inferior *inf, const char *filename,
2975 int *target_errno)
2976 {
2977 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
2978 {
2979 gdb::optional<std::string> ret
2980 = t->fileio_readlink (inf, filename, target_errno);
2981
2982 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
2983 continue;
2984
2985 if (targetdebug)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "target_fileio_readlink (%d,%s)"
2988 " = %s (%d)\n",
2989 inf == NULL ? 0 : inf->num,
2990 filename, ret ? ret->c_str () : "(nil)",
2991 ret ? 0 : *target_errno);
2992 return ret;
2993 }
2994
2995 *target_errno = FILEIO_ENOSYS;
2996 return {};
2997 }
2998
2999 /* Like scoped_fd, but specific to target fileio. */
3000
3001 class scoped_target_fd
3002 {
3003 public:
3004 explicit scoped_target_fd (int fd) noexcept
3005 : m_fd (fd)
3006 {
3007 }
3008
3009 ~scoped_target_fd ()
3010 {
3011 if (m_fd >= 0)
3012 {
3013 int target_errno;
3014
3015 target_fileio_close (m_fd, &target_errno);
3016 }
3017 }
3018
3019 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3020
3021 int get () const noexcept
3022 {
3023 return m_fd;
3024 }
3025
3026 private:
3027 int m_fd;
3028 };
3029
3030 /* Read target file FILENAME, in the filesystem as seen by INF. If
3031 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3032 remote targets, the remote stub). Store the result in *BUF_P and
3033 return the size of the transferred data. PADDING additional bytes
3034 are available in *BUF_P. This is a helper function for
3035 target_fileio_read_alloc; see the declaration of that function for
3036 more information. */
3037
3038 static LONGEST
3039 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3040 gdb_byte **buf_p, int padding)
3041 {
3042 size_t buf_alloc, buf_pos;
3043 gdb_byte *buf;
3044 LONGEST n;
3045 int target_errno;
3046
3047 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3048 0700, &target_errno));
3049 if (fd.get () == -1)
3050 return -1;
3051
3052 /* Start by reading up to 4K at a time. The target will throttle
3053 this number down if necessary. */
3054 buf_alloc = 4096;
3055 buf = (gdb_byte *) xmalloc (buf_alloc);
3056 buf_pos = 0;
3057 while (1)
3058 {
3059 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3060 buf_alloc - buf_pos - padding, buf_pos,
3061 &target_errno);
3062 if (n < 0)
3063 {
3064 /* An error occurred. */
3065 xfree (buf);
3066 return -1;
3067 }
3068 else if (n == 0)
3069 {
3070 /* Read all there was. */
3071 if (buf_pos == 0)
3072 xfree (buf);
3073 else
3074 *buf_p = buf;
3075 return buf_pos;
3076 }
3077
3078 buf_pos += n;
3079
3080 /* If the buffer is filling up, expand it. */
3081 if (buf_alloc < buf_pos * 2)
3082 {
3083 buf_alloc *= 2;
3084 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3085 }
3086
3087 QUIT;
3088 }
3089 }
3090
3091 /* See target.h. */
3092
3093 LONGEST
3094 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3095 gdb_byte **buf_p)
3096 {
3097 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3098 }
3099
3100 /* See target.h. */
3101
3102 gdb::unique_xmalloc_ptr<char>
3103 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3104 {
3105 gdb_byte *buffer;
3106 char *bufstr;
3107 LONGEST i, transferred;
3108
3109 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3110 bufstr = (char *) buffer;
3111
3112 if (transferred < 0)
3113 return gdb::unique_xmalloc_ptr<char> (nullptr);
3114
3115 if (transferred == 0)
3116 return make_unique_xstrdup ("");
3117
3118 bufstr[transferred] = 0;
3119
3120 /* Check for embedded NUL bytes; but allow trailing NULs. */
3121 for (i = strlen (bufstr); i < transferred; i++)
3122 if (bufstr[i] != 0)
3123 {
3124 warning (_("target file %s "
3125 "contained unexpected null characters"),
3126 filename);
3127 break;
3128 }
3129
3130 return gdb::unique_xmalloc_ptr<char> (bufstr);
3131 }
3132
3133
3134 static int
3135 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3136 CORE_ADDR addr, int len)
3137 {
3138 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3139 }
3140
3141 static int
3142 default_watchpoint_addr_within_range (struct target_ops *target,
3143 CORE_ADDR addr,
3144 CORE_ADDR start, int length)
3145 {
3146 return addr >= start && addr < start + length;
3147 }
3148
3149 /* See target.h. */
3150
3151 target_ops *
3152 target_stack::find_beneath (const target_ops *t) const
3153 {
3154 /* Look for a non-empty slot at stratum levels beneath T's. */
3155 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3156 if (m_stack[stratum] != NULL)
3157 return m_stack[stratum];
3158
3159 return NULL;
3160 }
3161
3162 /* See target.h. */
3163
3164 struct target_ops *
3165 find_target_at (enum strata stratum)
3166 {
3167 return current_inferior ()->target_at (stratum);
3168 }
3169
3170 \f
3171
3172 /* See target.h */
3173
3174 void
3175 target_announce_detach (int from_tty)
3176 {
3177 pid_t pid;
3178 const char *exec_file;
3179
3180 if (!from_tty)
3181 return;
3182
3183 exec_file = get_exec_file (0);
3184 if (exec_file == NULL)
3185 exec_file = "";
3186
3187 pid = inferior_ptid.pid ();
3188 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3189 target_pid_to_str (ptid_t (pid)).c_str ());
3190 }
3191
3192 /* The inferior process has died. Long live the inferior! */
3193
3194 void
3195 generic_mourn_inferior (void)
3196 {
3197 inferior *inf = current_inferior ();
3198
3199 inferior_ptid = null_ptid;
3200
3201 /* Mark breakpoints uninserted in case something tries to delete a
3202 breakpoint while we delete the inferior's threads (which would
3203 fail, since the inferior is long gone). */
3204 mark_breakpoints_out ();
3205
3206 if (inf->pid != 0)
3207 exit_inferior (inf);
3208
3209 /* Note this wipes step-resume breakpoints, so needs to be done
3210 after exit_inferior, which ends up referencing the step-resume
3211 breakpoints through clear_thread_inferior_resources. */
3212 breakpoint_init_inferior (inf_exited);
3213
3214 registers_changed ();
3215
3216 reopen_exec_file ();
3217 reinit_frame_cache ();
3218
3219 if (deprecated_detach_hook)
3220 deprecated_detach_hook ();
3221 }
3222 \f
3223 /* Convert a normal process ID to a string. Returns the string in a
3224 static buffer. */
3225
3226 std::string
3227 normal_pid_to_str (ptid_t ptid)
3228 {
3229 return string_printf ("process %d", ptid.pid ());
3230 }
3231
3232 static std::string
3233 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3234 {
3235 return normal_pid_to_str (ptid);
3236 }
3237
3238 /* Error-catcher for target_find_memory_regions. */
3239 static int
3240 dummy_find_memory_regions (struct target_ops *self,
3241 find_memory_region_ftype ignore1, void *ignore2)
3242 {
3243 error (_("Command not implemented for this target."));
3244 return 0;
3245 }
3246
3247 /* Error-catcher for target_make_corefile_notes. */
3248 static char *
3249 dummy_make_corefile_notes (struct target_ops *self,
3250 bfd *ignore1, int *ignore2)
3251 {
3252 error (_("Command not implemented for this target."));
3253 return NULL;
3254 }
3255
3256 #include "target-delegates.c"
3257
3258 /* The initial current target, so that there is always a semi-valid
3259 current target. */
3260
3261 static dummy_target the_dummy_target;
3262
3263 /* See target.h. */
3264
3265 target_ops *
3266 get_dummy_target ()
3267 {
3268 return &the_dummy_target;
3269 }
3270
3271 static const target_info dummy_target_info = {
3272 "None",
3273 N_("None"),
3274 ""
3275 };
3276
3277 strata
3278 dummy_target::stratum () const
3279 {
3280 return dummy_stratum;
3281 }
3282
3283 strata
3284 debug_target::stratum () const
3285 {
3286 return debug_stratum;
3287 }
3288
3289 const target_info &
3290 dummy_target::info () const
3291 {
3292 return dummy_target_info;
3293 }
3294
3295 const target_info &
3296 debug_target::info () const
3297 {
3298 return beneath ()->info ();
3299 }
3300
3301 \f
3302
3303 void
3304 target_close (struct target_ops *targ)
3305 {
3306 gdb_assert (!target_is_pushed (targ));
3307
3308 fileio_handles_invalidate_target (targ);
3309
3310 targ->close ();
3311
3312 if (targetdebug)
3313 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3314 }
3315
3316 int
3317 target_thread_alive (ptid_t ptid)
3318 {
3319 return current_top_target ()->thread_alive (ptid);
3320 }
3321
3322 void
3323 target_update_thread_list (void)
3324 {
3325 current_top_target ()->update_thread_list ();
3326 }
3327
3328 void
3329 target_stop (ptid_t ptid)
3330 {
3331 if (!may_stop)
3332 {
3333 warning (_("May not interrupt or stop the target, ignoring attempt"));
3334 return;
3335 }
3336
3337 current_top_target ()->stop (ptid);
3338 }
3339
3340 void
3341 target_interrupt ()
3342 {
3343 if (!may_stop)
3344 {
3345 warning (_("May not interrupt or stop the target, ignoring attempt"));
3346 return;
3347 }
3348
3349 current_top_target ()->interrupt ();
3350 }
3351
3352 /* See target.h. */
3353
3354 void
3355 target_pass_ctrlc (void)
3356 {
3357 /* Pass the Ctrl-C to the first target that has a thread
3358 running. */
3359 for (inferior *inf : all_inferiors ())
3360 {
3361 target_ops *proc_target = inf->process_target ();
3362 if (proc_target == NULL)
3363 continue;
3364
3365 for (thread_info *thr : inf->threads ())
3366 {
3367 /* A thread can be THREAD_STOPPED and executing, while
3368 running an infcall. */
3369 if (thr->state == THREAD_RUNNING || thr->executing)
3370 {
3371 /* We can get here quite deep in target layers. Avoid
3372 switching thread context or anything that would
3373 communicate with the target (e.g., to fetch
3374 registers), or flushing e.g., the frame cache. We
3375 just switch inferior in order to be able to call
3376 through the target_stack. */
3377 scoped_restore_current_inferior restore_inferior;
3378 set_current_inferior (inf);
3379 current_top_target ()->pass_ctrlc ();
3380 return;
3381 }
3382 }
3383 }
3384 }
3385
3386 /* See target.h. */
3387
3388 void
3389 default_target_pass_ctrlc (struct target_ops *ops)
3390 {
3391 target_interrupt ();
3392 }
3393
3394 /* See target/target.h. */
3395
3396 void
3397 target_stop_and_wait (ptid_t ptid)
3398 {
3399 struct target_waitstatus status;
3400 bool was_non_stop = non_stop;
3401
3402 non_stop = true;
3403 target_stop (ptid);
3404
3405 memset (&status, 0, sizeof (status));
3406 target_wait (ptid, &status, 0);
3407
3408 non_stop = was_non_stop;
3409 }
3410
3411 /* See target/target.h. */
3412
3413 void
3414 target_continue_no_signal (ptid_t ptid)
3415 {
3416 target_resume (ptid, 0, GDB_SIGNAL_0);
3417 }
3418
3419 /* See target/target.h. */
3420
3421 void
3422 target_continue (ptid_t ptid, enum gdb_signal signal)
3423 {
3424 target_resume (ptid, 0, signal);
3425 }
3426
3427 /* Concatenate ELEM to LIST, a comma-separated list. */
3428
3429 static void
3430 str_comma_list_concat_elem (std::string *list, const char *elem)
3431 {
3432 if (!list->empty ())
3433 list->append (", ");
3434
3435 list->append (elem);
3436 }
3437
3438 /* Helper for target_options_to_string. If OPT is present in
3439 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3440 OPT is removed from TARGET_OPTIONS. */
3441
3442 static void
3443 do_option (int *target_options, std::string *ret,
3444 int opt, const char *opt_str)
3445 {
3446 if ((*target_options & opt) != 0)
3447 {
3448 str_comma_list_concat_elem (ret, opt_str);
3449 *target_options &= ~opt;
3450 }
3451 }
3452
3453 /* See target.h. */
3454
3455 std::string
3456 target_options_to_string (int target_options)
3457 {
3458 std::string ret;
3459
3460 #define DO_TARG_OPTION(OPT) \
3461 do_option (&target_options, &ret, OPT, #OPT)
3462
3463 DO_TARG_OPTION (TARGET_WNOHANG);
3464
3465 if (target_options != 0)
3466 str_comma_list_concat_elem (&ret, "unknown???");
3467
3468 return ret;
3469 }
3470
3471 void
3472 target_fetch_registers (struct regcache *regcache, int regno)
3473 {
3474 current_top_target ()->fetch_registers (regcache, regno);
3475 if (targetdebug)
3476 regcache->debug_print_register ("target_fetch_registers", regno);
3477 }
3478
3479 void
3480 target_store_registers (struct regcache *regcache, int regno)
3481 {
3482 if (!may_write_registers)
3483 error (_("Writing to registers is not allowed (regno %d)"), regno);
3484
3485 current_top_target ()->store_registers (regcache, regno);
3486 if (targetdebug)
3487 {
3488 regcache->debug_print_register ("target_store_registers", regno);
3489 }
3490 }
3491
3492 int
3493 target_core_of_thread (ptid_t ptid)
3494 {
3495 return current_top_target ()->core_of_thread (ptid);
3496 }
3497
3498 int
3499 simple_verify_memory (struct target_ops *ops,
3500 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3501 {
3502 LONGEST total_xfered = 0;
3503
3504 while (total_xfered < size)
3505 {
3506 ULONGEST xfered_len;
3507 enum target_xfer_status status;
3508 gdb_byte buf[1024];
3509 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3510
3511 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3512 buf, NULL, lma + total_xfered, howmuch,
3513 &xfered_len);
3514 if (status == TARGET_XFER_OK
3515 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3516 {
3517 total_xfered += xfered_len;
3518 QUIT;
3519 }
3520 else
3521 return 0;
3522 }
3523 return 1;
3524 }
3525
3526 /* Default implementation of memory verification. */
3527
3528 static int
3529 default_verify_memory (struct target_ops *self,
3530 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3531 {
3532 /* Start over from the top of the target stack. */
3533 return simple_verify_memory (current_top_target (),
3534 data, memaddr, size);
3535 }
3536
3537 int
3538 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3539 {
3540 return current_top_target ()->verify_memory (data, memaddr, size);
3541 }
3542
3543 /* The documentation for this function is in its prototype declaration in
3544 target.h. */
3545
3546 int
3547 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3548 enum target_hw_bp_type rw)
3549 {
3550 return current_top_target ()->insert_mask_watchpoint (addr, mask, rw);
3551 }
3552
3553 /* The documentation for this function is in its prototype declaration in
3554 target.h. */
3555
3556 int
3557 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3558 enum target_hw_bp_type rw)
3559 {
3560 return current_top_target ()->remove_mask_watchpoint (addr, mask, rw);
3561 }
3562
3563 /* The documentation for this function is in its prototype declaration
3564 in target.h. */
3565
3566 int
3567 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3568 {
3569 return current_top_target ()->masked_watch_num_registers (addr, mask);
3570 }
3571
3572 /* The documentation for this function is in its prototype declaration
3573 in target.h. */
3574
3575 int
3576 target_ranged_break_num_registers (void)
3577 {
3578 return current_top_target ()->ranged_break_num_registers ();
3579 }
3580
3581 /* See target.h. */
3582
3583 struct btrace_target_info *
3584 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
3585 {
3586 return current_top_target ()->enable_btrace (ptid, conf);
3587 }
3588
3589 /* See target.h. */
3590
3591 void
3592 target_disable_btrace (struct btrace_target_info *btinfo)
3593 {
3594 current_top_target ()->disable_btrace (btinfo);
3595 }
3596
3597 /* See target.h. */
3598
3599 void
3600 target_teardown_btrace (struct btrace_target_info *btinfo)
3601 {
3602 current_top_target ()->teardown_btrace (btinfo);
3603 }
3604
3605 /* See target.h. */
3606
3607 enum btrace_error
3608 target_read_btrace (struct btrace_data *btrace,
3609 struct btrace_target_info *btinfo,
3610 enum btrace_read_type type)
3611 {
3612 return current_top_target ()->read_btrace (btrace, btinfo, type);
3613 }
3614
3615 /* See target.h. */
3616
3617 const struct btrace_config *
3618 target_btrace_conf (const struct btrace_target_info *btinfo)
3619 {
3620 return current_top_target ()->btrace_conf (btinfo);
3621 }
3622
3623 /* See target.h. */
3624
3625 void
3626 target_stop_recording (void)
3627 {
3628 current_top_target ()->stop_recording ();
3629 }
3630
3631 /* See target.h. */
3632
3633 void
3634 target_save_record (const char *filename)
3635 {
3636 current_top_target ()->save_record (filename);
3637 }
3638
3639 /* See target.h. */
3640
3641 int
3642 target_supports_delete_record ()
3643 {
3644 return current_top_target ()->supports_delete_record ();
3645 }
3646
3647 /* See target.h. */
3648
3649 void
3650 target_delete_record (void)
3651 {
3652 current_top_target ()->delete_record ();
3653 }
3654
3655 /* See target.h. */
3656
3657 enum record_method
3658 target_record_method (ptid_t ptid)
3659 {
3660 return current_top_target ()->record_method (ptid);
3661 }
3662
3663 /* See target.h. */
3664
3665 int
3666 target_record_is_replaying (ptid_t ptid)
3667 {
3668 return current_top_target ()->record_is_replaying (ptid);
3669 }
3670
3671 /* See target.h. */
3672
3673 int
3674 target_record_will_replay (ptid_t ptid, int dir)
3675 {
3676 return current_top_target ()->record_will_replay (ptid, dir);
3677 }
3678
3679 /* See target.h. */
3680
3681 void
3682 target_record_stop_replaying (void)
3683 {
3684 current_top_target ()->record_stop_replaying ();
3685 }
3686
3687 /* See target.h. */
3688
3689 void
3690 target_goto_record_begin (void)
3691 {
3692 current_top_target ()->goto_record_begin ();
3693 }
3694
3695 /* See target.h. */
3696
3697 void
3698 target_goto_record_end (void)
3699 {
3700 current_top_target ()->goto_record_end ();
3701 }
3702
3703 /* See target.h. */
3704
3705 void
3706 target_goto_record (ULONGEST insn)
3707 {
3708 current_top_target ()->goto_record (insn);
3709 }
3710
3711 /* See target.h. */
3712
3713 void
3714 target_insn_history (int size, gdb_disassembly_flags flags)
3715 {
3716 current_top_target ()->insn_history (size, flags);
3717 }
3718
3719 /* See target.h. */
3720
3721 void
3722 target_insn_history_from (ULONGEST from, int size,
3723 gdb_disassembly_flags flags)
3724 {
3725 current_top_target ()->insn_history_from (from, size, flags);
3726 }
3727
3728 /* See target.h. */
3729
3730 void
3731 target_insn_history_range (ULONGEST begin, ULONGEST end,
3732 gdb_disassembly_flags flags)
3733 {
3734 current_top_target ()->insn_history_range (begin, end, flags);
3735 }
3736
3737 /* See target.h. */
3738
3739 void
3740 target_call_history (int size, record_print_flags flags)
3741 {
3742 current_top_target ()->call_history (size, flags);
3743 }
3744
3745 /* See target.h. */
3746
3747 void
3748 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
3749 {
3750 current_top_target ()->call_history_from (begin, size, flags);
3751 }
3752
3753 /* See target.h. */
3754
3755 void
3756 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
3757 {
3758 current_top_target ()->call_history_range (begin, end, flags);
3759 }
3760
3761 /* See target.h. */
3762
3763 const struct frame_unwind *
3764 target_get_unwinder (void)
3765 {
3766 return current_top_target ()->get_unwinder ();
3767 }
3768
3769 /* See target.h. */
3770
3771 const struct frame_unwind *
3772 target_get_tailcall_unwinder (void)
3773 {
3774 return current_top_target ()->get_tailcall_unwinder ();
3775 }
3776
3777 /* See target.h. */
3778
3779 void
3780 target_prepare_to_generate_core (void)
3781 {
3782 current_top_target ()->prepare_to_generate_core ();
3783 }
3784
3785 /* See target.h. */
3786
3787 void
3788 target_done_generating_core (void)
3789 {
3790 current_top_target ()->done_generating_core ();
3791 }
3792
3793 \f
3794
3795 static char targ_desc[] =
3796 "Names of targets and files being debugged.\nShows the entire \
3797 stack of targets currently in use (including the exec-file,\n\
3798 core-file, and process, if any), as well as the symbol file name.";
3799
3800 static void
3801 default_rcmd (struct target_ops *self, const char *command,
3802 struct ui_file *output)
3803 {
3804 error (_("\"monitor\" command not supported by this target."));
3805 }
3806
3807 static void
3808 do_monitor_command (const char *cmd, int from_tty)
3809 {
3810 target_rcmd (cmd, gdb_stdtarg);
3811 }
3812
3813 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
3814 ignored. */
3815
3816 void
3817 flash_erase_command (const char *cmd, int from_tty)
3818 {
3819 /* Used to communicate termination of flash operations to the target. */
3820 bool found_flash_region = false;
3821 struct gdbarch *gdbarch = target_gdbarch ();
3822
3823 std::vector<mem_region> mem_regions = target_memory_map ();
3824
3825 /* Iterate over all memory regions. */
3826 for (const mem_region &m : mem_regions)
3827 {
3828 /* Is this a flash memory region? */
3829 if (m.attrib.mode == MEM_FLASH)
3830 {
3831 found_flash_region = true;
3832 target_flash_erase (m.lo, m.hi - m.lo);
3833
3834 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
3835
3836 current_uiout->message (_("Erasing flash memory region at address "));
3837 current_uiout->field_core_addr ("address", gdbarch, m.lo);
3838 current_uiout->message (", size = ");
3839 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
3840 current_uiout->message ("\n");
3841 }
3842 }
3843
3844 /* Did we do any flash operations? If so, we need to finalize them. */
3845 if (found_flash_region)
3846 target_flash_done ();
3847 else
3848 current_uiout->message (_("No flash memory regions found.\n"));
3849 }
3850
3851 /* Print the name of each layers of our target stack. */
3852
3853 static void
3854 maintenance_print_target_stack (const char *cmd, int from_tty)
3855 {
3856 printf_filtered (_("The current target stack is:\n"));
3857
3858 for (target_ops *t = current_top_target (); t != NULL; t = t->beneath ())
3859 {
3860 if (t->stratum () == debug_stratum)
3861 continue;
3862 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
3863 }
3864 }
3865
3866 /* See target.h. */
3867
3868 void
3869 target_async (int enable)
3870 {
3871 infrun_async (enable);
3872 current_top_target ()->async (enable);
3873 }
3874
3875 /* See target.h. */
3876
3877 void
3878 target_thread_events (int enable)
3879 {
3880 current_top_target ()->thread_events (enable);
3881 }
3882
3883 /* Controls if targets can report that they can/are async. This is
3884 just for maintainers to use when debugging gdb. */
3885 bool target_async_permitted = true;
3886
3887 /* The set command writes to this variable. If the inferior is
3888 executing, target_async_permitted is *not* updated. */
3889 static bool target_async_permitted_1 = true;
3890
3891 static void
3892 maint_set_target_async_command (const char *args, int from_tty,
3893 struct cmd_list_element *c)
3894 {
3895 if (have_live_inferiors ())
3896 {
3897 target_async_permitted_1 = target_async_permitted;
3898 error (_("Cannot change this setting while the inferior is running."));
3899 }
3900
3901 target_async_permitted = target_async_permitted_1;
3902 }
3903
3904 static void
3905 maint_show_target_async_command (struct ui_file *file, int from_tty,
3906 struct cmd_list_element *c,
3907 const char *value)
3908 {
3909 fprintf_filtered (file,
3910 _("Controlling the inferior in "
3911 "asynchronous mode is %s.\n"), value);
3912 }
3913
3914 /* Return true if the target operates in non-stop mode even with "set
3915 non-stop off". */
3916
3917 static int
3918 target_always_non_stop_p (void)
3919 {
3920 return current_top_target ()->always_non_stop_p ();
3921 }
3922
3923 /* See target.h. */
3924
3925 int
3926 target_is_non_stop_p (void)
3927 {
3928 return (non_stop
3929 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
3930 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
3931 && target_always_non_stop_p ()));
3932 }
3933
3934 /* Controls if targets can report that they always run in non-stop
3935 mode. This is just for maintainers to use when debugging gdb. */
3936 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
3937
3938 /* The set command writes to this variable. If the inferior is
3939 executing, target_non_stop_enabled is *not* updated. */
3940 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
3941
3942 /* Implementation of "maint set target-non-stop". */
3943
3944 static void
3945 maint_set_target_non_stop_command (const char *args, int from_tty,
3946 struct cmd_list_element *c)
3947 {
3948 if (have_live_inferiors ())
3949 {
3950 target_non_stop_enabled_1 = target_non_stop_enabled;
3951 error (_("Cannot change this setting while the inferior is running."));
3952 }
3953
3954 target_non_stop_enabled = target_non_stop_enabled_1;
3955 }
3956
3957 /* Implementation of "maint show target-non-stop". */
3958
3959 static void
3960 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
3961 struct cmd_list_element *c,
3962 const char *value)
3963 {
3964 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
3965 fprintf_filtered (file,
3966 _("Whether the target is always in non-stop mode "
3967 "is %s (currently %s).\n"), value,
3968 target_always_non_stop_p () ? "on" : "off");
3969 else
3970 fprintf_filtered (file,
3971 _("Whether the target is always in non-stop mode "
3972 "is %s.\n"), value);
3973 }
3974
3975 /* Temporary copies of permission settings. */
3976
3977 static bool may_write_registers_1 = true;
3978 static bool may_write_memory_1 = true;
3979 static bool may_insert_breakpoints_1 = true;
3980 static bool may_insert_tracepoints_1 = true;
3981 static bool may_insert_fast_tracepoints_1 = true;
3982 static bool may_stop_1 = true;
3983
3984 /* Make the user-set values match the real values again. */
3985
3986 void
3987 update_target_permissions (void)
3988 {
3989 may_write_registers_1 = may_write_registers;
3990 may_write_memory_1 = may_write_memory;
3991 may_insert_breakpoints_1 = may_insert_breakpoints;
3992 may_insert_tracepoints_1 = may_insert_tracepoints;
3993 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3994 may_stop_1 = may_stop;
3995 }
3996
3997 /* The one function handles (most of) the permission flags in the same
3998 way. */
3999
4000 static void
4001 set_target_permissions (const char *args, int from_tty,
4002 struct cmd_list_element *c)
4003 {
4004 if (target_has_execution)
4005 {
4006 update_target_permissions ();
4007 error (_("Cannot change this setting while the inferior is running."));
4008 }
4009
4010 /* Make the real values match the user-changed values. */
4011 may_write_registers = may_write_registers_1;
4012 may_insert_breakpoints = may_insert_breakpoints_1;
4013 may_insert_tracepoints = may_insert_tracepoints_1;
4014 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4015 may_stop = may_stop_1;
4016 update_observer_mode ();
4017 }
4018
4019 /* Set memory write permission independently of observer mode. */
4020
4021 static void
4022 set_write_memory_permission (const char *args, int from_tty,
4023 struct cmd_list_element *c)
4024 {
4025 /* Make the real values match the user-changed values. */
4026 may_write_memory = may_write_memory_1;
4027 update_observer_mode ();
4028 }
4029
4030 void _initialize_target ();
4031
4032 void
4033 _initialize_target ()
4034 {
4035 the_debug_target = new debug_target ();
4036
4037 add_info ("target", info_target_command, targ_desc);
4038 add_info ("files", info_target_command, targ_desc);
4039
4040 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4041 Set target debugging."), _("\
4042 Show target debugging."), _("\
4043 When non-zero, target debugging is enabled. Higher numbers are more\n\
4044 verbose."),
4045 set_targetdebug,
4046 show_targetdebug,
4047 &setdebuglist, &showdebuglist);
4048
4049 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4050 &trust_readonly, _("\
4051 Set mode for reading from readonly sections."), _("\
4052 Show mode for reading from readonly sections."), _("\
4053 When this mode is on, memory reads from readonly sections (such as .text)\n\
4054 will be read from the object file instead of from the target. This will\n\
4055 result in significant performance improvement for remote targets."),
4056 NULL,
4057 show_trust_readonly,
4058 &setlist, &showlist);
4059
4060 add_com ("monitor", class_obscure, do_monitor_command,
4061 _("Send a command to the remote monitor (remote targets only)."));
4062
4063 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4064 _("Print the name of each layer of the internal target stack."),
4065 &maintenanceprintlist);
4066
4067 add_setshow_boolean_cmd ("target-async", no_class,
4068 &target_async_permitted_1, _("\
4069 Set whether gdb controls the inferior in asynchronous mode."), _("\
4070 Show whether gdb controls the inferior in asynchronous mode."), _("\
4071 Tells gdb whether to control the inferior in asynchronous mode."),
4072 maint_set_target_async_command,
4073 maint_show_target_async_command,
4074 &maintenance_set_cmdlist,
4075 &maintenance_show_cmdlist);
4076
4077 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4078 &target_non_stop_enabled_1, _("\
4079 Set whether gdb always controls the inferior in non-stop mode."), _("\
4080 Show whether gdb always controls the inferior in non-stop mode."), _("\
4081 Tells gdb whether to control the inferior in non-stop mode."),
4082 maint_set_target_non_stop_command,
4083 maint_show_target_non_stop_command,
4084 &maintenance_set_cmdlist,
4085 &maintenance_show_cmdlist);
4086
4087 add_setshow_boolean_cmd ("may-write-registers", class_support,
4088 &may_write_registers_1, _("\
4089 Set permission to write into registers."), _("\
4090 Show permission to write into registers."), _("\
4091 When this permission is on, GDB may write into the target's registers.\n\
4092 Otherwise, any sort of write attempt will result in an error."),
4093 set_target_permissions, NULL,
4094 &setlist, &showlist);
4095
4096 add_setshow_boolean_cmd ("may-write-memory", class_support,
4097 &may_write_memory_1, _("\
4098 Set permission to write into target memory."), _("\
4099 Show permission to write into target memory."), _("\
4100 When this permission is on, GDB may write into the target's memory.\n\
4101 Otherwise, any sort of write attempt will result in an error."),
4102 set_write_memory_permission, NULL,
4103 &setlist, &showlist);
4104
4105 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4106 &may_insert_breakpoints_1, _("\
4107 Set permission to insert breakpoints in the target."), _("\
4108 Show permission to insert breakpoints in the target."), _("\
4109 When this permission is on, GDB may insert breakpoints in the program.\n\
4110 Otherwise, any sort of insertion attempt will result in an error."),
4111 set_target_permissions, NULL,
4112 &setlist, &showlist);
4113
4114 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4115 &may_insert_tracepoints_1, _("\
4116 Set permission to insert tracepoints in the target."), _("\
4117 Show permission to insert tracepoints in the target."), _("\
4118 When this permission is on, GDB may insert tracepoints in the program.\n\
4119 Otherwise, any sort of insertion attempt will result in an error."),
4120 set_target_permissions, NULL,
4121 &setlist, &showlist);
4122
4123 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4124 &may_insert_fast_tracepoints_1, _("\
4125 Set permission to insert fast tracepoints in the target."), _("\
4126 Show permission to insert fast tracepoints in the target."), _("\
4127 When this permission is on, GDB may insert fast tracepoints.\n\
4128 Otherwise, any sort of insertion attempt will result in an error."),
4129 set_target_permissions, NULL,
4130 &setlist, &showlist);
4131
4132 add_setshow_boolean_cmd ("may-interrupt", class_support,
4133 &may_stop_1, _("\
4134 Set permission to interrupt or signal the target."), _("\
4135 Show permission to interrupt or signal the target."), _("\
4136 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4137 Otherwise, any attempt to interrupt or stop will be ignored."),
4138 set_target_permissions, NULL,
4139 &setlist, &showlist);
4140
4141 add_com ("flash-erase", no_class, flash_erase_command,
4142 _("Erase all flash memory regions."));
4143
4144 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4145 &auto_connect_native_target, _("\
4146 Set whether GDB may automatically connect to the native target."), _("\
4147 Show whether GDB may automatically connect to the native target."), _("\
4148 When on, and GDB is not connected to a target yet, GDB\n\
4149 attempts \"run\" and other commands with the native target."),
4150 NULL, show_auto_connect_native_target,
4151 &setlist, &showlist);
4152 }
This page took 0.127385 seconds and 4 git commands to generate.