Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2021 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "target.h"
24 #include "target-dcache.h"
25 #include "gdbcmd.h"
26 #include "symtab.h"
27 #include "inferior.h"
28 #include "infrun.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdbcore.h"
36 #include "target-descriptions.h"
37 #include "gdbthread.h"
38 #include "solib.h"
39 #include "exec.h"
40 #include "inline-frame.h"
41 #include "tracepoint.h"
42 #include "gdb/fileio.h"
43 #include "gdbsupport/agent.h"
44 #include "auxv.h"
45 #include "target-debug.h"
46 #include "top.h"
47 #include "event-top.h"
48 #include <algorithm>
49 #include "gdbsupport/byte-vector.h"
50 #include "gdbsupport/search.h"
51 #include "terminal.h"
52 #include <unordered_map>
53 #include "target-connection.h"
54 #include "valprint.h"
55 #include "cli/cli-decode.h"
56
57 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
58
59 static void default_terminal_info (struct target_ops *, const char *, int);
60
61 static int default_watchpoint_addr_within_range (struct target_ops *,
62 CORE_ADDR, CORE_ADDR, int);
63
64 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
65 CORE_ADDR, int);
66
67 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
68
69 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
70 long lwp, long tid);
71
72 static void default_mourn_inferior (struct target_ops *self);
73
74 static int default_search_memory (struct target_ops *ops,
75 CORE_ADDR start_addr,
76 ULONGEST search_space_len,
77 const gdb_byte *pattern,
78 ULONGEST pattern_len,
79 CORE_ADDR *found_addrp);
80
81 static int default_verify_memory (struct target_ops *self,
82 const gdb_byte *data,
83 CORE_ADDR memaddr, ULONGEST size);
84
85 static void tcomplain (void) ATTRIBUTE_NORETURN;
86
87 static struct target_ops *find_default_run_target (const char *);
88
89 static int dummy_find_memory_regions (struct target_ops *self,
90 find_memory_region_ftype ignore1,
91 void *ignore2);
92
93 static gdb::unique_xmalloc_ptr<char> dummy_make_corefile_notes
94 (struct target_ops *self, bfd *ignore1, int *ignore2);
95
96 static std::string default_pid_to_str (struct target_ops *ops, ptid_t ptid);
97
98 static enum exec_direction_kind default_execution_direction
99 (struct target_ops *self);
100
101 /* Mapping between target_info objects (which have address identity)
102 and corresponding open/factory function/callback. Each add_target
103 call adds one entry to this map, and registers a "target
104 TARGET_NAME" command that when invoked calls the factory registered
105 here. The target_info object is associated with the command via
106 the command's context. */
107 static std::unordered_map<const target_info *, target_open_ftype *>
108 target_factories;
109
110 /* The singleton debug target. */
111
112 static struct target_ops *the_debug_target;
113
114 /* Command list for target. */
115
116 static struct cmd_list_element *targetlist = NULL;
117
118 /* True if we should trust readonly sections from the
119 executable when reading memory. */
120
121 static bool trust_readonly = false;
122
123 /* Nonzero if we should show true memory content including
124 memory breakpoint inserted by gdb. */
125
126 static int show_memory_breakpoints = 0;
127
128 /* These globals control whether GDB attempts to perform these
129 operations; they are useful for targets that need to prevent
130 inadvertent disruption, such as in non-stop mode. */
131
132 bool may_write_registers = true;
133
134 bool may_write_memory = true;
135
136 bool may_insert_breakpoints = true;
137
138 bool may_insert_tracepoints = true;
139
140 bool may_insert_fast_tracepoints = true;
141
142 bool may_stop = true;
143
144 /* Non-zero if we want to see trace of target level stuff. */
145
146 static unsigned int targetdebug = 0;
147
148 static void
149 set_targetdebug (const char *args, int from_tty, struct cmd_list_element *c)
150 {
151 if (targetdebug)
152 current_inferior ()->push_target (the_debug_target);
153 else
154 current_inferior ()->unpush_target (the_debug_target);
155 }
156
157 static void
158 show_targetdebug (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160 {
161 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
162 }
163
164 int
165 target_has_memory ()
166 {
167 for (target_ops *t = current_inferior ()->top_target ();
168 t != NULL;
169 t = t->beneath ())
170 if (t->has_memory ())
171 return 1;
172
173 return 0;
174 }
175
176 int
177 target_has_stack ()
178 {
179 for (target_ops *t = current_inferior ()->top_target ();
180 t != NULL;
181 t = t->beneath ())
182 if (t->has_stack ())
183 return 1;
184
185 return 0;
186 }
187
188 int
189 target_has_registers ()
190 {
191 for (target_ops *t = current_inferior ()->top_target ();
192 t != NULL;
193 t = t->beneath ())
194 if (t->has_registers ())
195 return 1;
196
197 return 0;
198 }
199
200 bool
201 target_has_execution (inferior *inf)
202 {
203 if (inf == nullptr)
204 inf = current_inferior ();
205
206 for (target_ops *t = inf->top_target ();
207 t != nullptr;
208 t = inf->find_target_beneath (t))
209 if (t->has_execution (inf))
210 return true;
211
212 return false;
213 }
214
215 const char *
216 target_shortname ()
217 {
218 return current_inferior ()->top_target ()->shortname ();
219 }
220
221 /* See target.h. */
222
223 bool
224 target_attach_no_wait ()
225 {
226 return current_inferior ()->top_target ()->attach_no_wait ();
227 }
228
229 /* See target.h. */
230
231 void
232 target_post_attach (int pid)
233 {
234 return current_inferior ()->top_target ()->post_attach (pid);
235 }
236
237 /* See target.h. */
238
239 void
240 target_prepare_to_store (regcache *regcache)
241 {
242 return current_inferior ()->top_target ()->prepare_to_store (regcache);
243 }
244
245 /* See target.h. */
246
247 bool
248 target_supports_enable_disable_tracepoint ()
249 {
250 target_ops *target = current_inferior ()->top_target ();
251
252 return target->supports_enable_disable_tracepoint ();
253 }
254
255 bool
256 target_supports_string_tracing ()
257 {
258 return current_inferior ()->top_target ()->supports_string_tracing ();
259 }
260
261 /* See target.h. */
262
263 bool
264 target_supports_evaluation_of_breakpoint_conditions ()
265 {
266 target_ops *target = current_inferior ()->top_target ();
267
268 return target->supports_evaluation_of_breakpoint_conditions ();
269 }
270
271 /* See target.h. */
272
273 bool
274 target_supports_dumpcore ()
275 {
276 return current_inferior ()->top_target ()->supports_dumpcore ();
277 }
278
279 /* See target.h. */
280
281 void
282 target_dumpcore (const char *filename)
283 {
284 return current_inferior ()->top_target ()->dumpcore (filename);
285 }
286
287 /* See target.h. */
288
289 bool
290 target_can_run_breakpoint_commands ()
291 {
292 return current_inferior ()->top_target ()->can_run_breakpoint_commands ();
293 }
294
295 /* See target.h. */
296
297 void
298 target_files_info ()
299 {
300 return current_inferior ()->top_target ()->files_info ();
301 }
302
303 /* See target.h. */
304
305 void
306 target_post_startup_inferior (ptid_t ptid)
307 {
308 return current_inferior ()->top_target ()->post_startup_inferior (ptid);
309 }
310
311 /* See target.h. */
312
313 int
314 target_insert_fork_catchpoint (int pid)
315 {
316 return current_inferior ()->top_target ()->insert_fork_catchpoint (pid);
317 }
318
319 /* See target.h. */
320
321 int
322 target_remove_fork_catchpoint (int pid)
323 {
324 return current_inferior ()->top_target ()->remove_fork_catchpoint (pid);
325 }
326
327 /* See target.h. */
328
329 int
330 target_insert_vfork_catchpoint (int pid)
331 {
332 return current_inferior ()->top_target ()->insert_vfork_catchpoint (pid);
333 }
334
335 /* See target.h. */
336
337 int
338 target_remove_vfork_catchpoint (int pid)
339 {
340 return current_inferior ()->top_target ()->remove_vfork_catchpoint (pid);
341 }
342
343 /* See target.h. */
344
345 int
346 target_insert_exec_catchpoint (int pid)
347 {
348 return current_inferior ()->top_target ()->insert_exec_catchpoint (pid);
349 }
350
351 /* See target.h. */
352
353 int
354 target_remove_exec_catchpoint (int pid)
355 {
356 return current_inferior ()->top_target ()->remove_exec_catchpoint (pid);
357 }
358
359 /* See target.h. */
360
361 int
362 target_set_syscall_catchpoint (int pid, bool needed, int any_count,
363 gdb::array_view<const int> syscall_counts)
364 {
365 target_ops *target = current_inferior ()->top_target ();
366
367 return target->set_syscall_catchpoint (pid, needed, any_count,
368 syscall_counts);
369 }
370
371 /* See target.h. */
372
373 void
374 target_rcmd (const char *command, struct ui_file *outbuf)
375 {
376 return current_inferior ()->top_target ()->rcmd (command, outbuf);
377 }
378
379 /* See target.h. */
380
381 bool
382 target_can_lock_scheduler ()
383 {
384 target_ops *target = current_inferior ()->top_target ();
385
386 return (target->get_thread_control_capabilities ()& tc_schedlock) != 0;
387 }
388
389 /* See target.h. */
390
391 bool
392 target_can_async_p ()
393 {
394 return current_inferior ()->top_target ()->can_async_p ();
395 }
396
397 /* See target.h. */
398
399 bool
400 target_is_async_p ()
401 {
402 return current_inferior ()->top_target ()->is_async_p ();
403 }
404
405 exec_direction_kind
406 target_execution_direction ()
407 {
408 return current_inferior ()->top_target ()->execution_direction ();
409 }
410
411 /* See target.h. */
412
413 const char *
414 target_extra_thread_info (thread_info *tp)
415 {
416 return current_inferior ()->top_target ()->extra_thread_info (tp);
417 }
418
419 /* See target.h. */
420
421 char *
422 target_pid_to_exec_file (int pid)
423 {
424 return current_inferior ()->top_target ()->pid_to_exec_file (pid);
425 }
426
427 /* See target.h. */
428
429 gdbarch *
430 target_thread_architecture (ptid_t ptid)
431 {
432 return current_inferior ()->top_target ()->thread_architecture (ptid);
433 }
434
435 /* See target.h. */
436
437 int
438 target_find_memory_regions (find_memory_region_ftype func, void *data)
439 {
440 return current_inferior ()->top_target ()->find_memory_regions (func, data);
441 }
442
443 /* See target.h. */
444
445 gdb::unique_xmalloc_ptr<char>
446 target_make_corefile_notes (bfd *bfd, int *size_p)
447 {
448 return current_inferior ()->top_target ()->make_corefile_notes (bfd, size_p);
449 }
450
451 gdb_byte *
452 target_get_bookmark (const char *args, int from_tty)
453 {
454 return current_inferior ()->top_target ()->get_bookmark (args, from_tty);
455 }
456
457 void
458 target_goto_bookmark (const gdb_byte *arg, int from_tty)
459 {
460 return current_inferior ()->top_target ()->goto_bookmark (arg, from_tty);
461 }
462
463 /* See target.h. */
464
465 bool
466 target_stopped_by_watchpoint ()
467 {
468 return current_inferior ()->top_target ()->stopped_by_watchpoint ();
469 }
470
471 /* See target.h. */
472
473 bool
474 target_stopped_by_sw_breakpoint ()
475 {
476 return current_inferior ()->top_target ()->stopped_by_sw_breakpoint ();
477 }
478
479 bool
480 target_supports_stopped_by_sw_breakpoint ()
481 {
482 target_ops *target = current_inferior ()->top_target ();
483
484 return target->supports_stopped_by_sw_breakpoint ();
485 }
486
487 bool
488 target_stopped_by_hw_breakpoint ()
489 {
490 return current_inferior ()->top_target ()->stopped_by_hw_breakpoint ();
491 }
492
493 bool
494 target_supports_stopped_by_hw_breakpoint ()
495 {
496 target_ops *target = current_inferior ()->top_target ();
497
498 return target->supports_stopped_by_hw_breakpoint ();
499 }
500
501 /* See target.h. */
502
503 bool
504 target_have_steppable_watchpoint ()
505 {
506 return current_inferior ()->top_target ()->have_steppable_watchpoint ();
507 }
508
509 /* See target.h. */
510
511 int
512 target_can_use_hardware_watchpoint (bptype type, int cnt, int othertype)
513 {
514 target_ops *target = current_inferior ()->top_target ();
515
516 return target->can_use_hw_breakpoint (type, cnt, othertype);
517 }
518
519 /* See target.h. */
520
521 int
522 target_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
523 {
524 target_ops *target = current_inferior ()->top_target ();
525
526 return target->region_ok_for_hw_watchpoint (addr, len);
527 }
528
529
530 int
531 target_can_do_single_step ()
532 {
533 return current_inferior ()->top_target ()->can_do_single_step ();
534 }
535
536 /* See target.h. */
537
538 int
539 target_insert_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
540 expression *cond)
541 {
542 target_ops *target = current_inferior ()->top_target ();
543
544 return target->insert_watchpoint (addr, len, type, cond);
545 }
546
547 /* See target.h. */
548
549 int
550 target_remove_watchpoint (CORE_ADDR addr, int len, target_hw_bp_type type,
551 expression *cond)
552 {
553 target_ops *target = current_inferior ()->top_target ();
554
555 return target->remove_watchpoint (addr, len, type, cond);
556 }
557
558 /* See target.h. */
559
560 int
561 target_insert_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
562 {
563 target_ops *target = current_inferior ()->top_target ();
564
565 return target->insert_hw_breakpoint (gdbarch, bp_tgt);
566 }
567
568 /* See target.h. */
569
570 int
571 target_remove_hw_breakpoint (gdbarch *gdbarch, bp_target_info *bp_tgt)
572 {
573 target_ops *target = current_inferior ()->top_target ();
574
575 return target->remove_hw_breakpoint (gdbarch, bp_tgt);
576 }
577
578 /* See target.h. */
579
580 bool
581 target_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int type,
582 expression *cond)
583 {
584 target_ops *target = current_inferior ()->top_target ();
585
586 return target->can_accel_watchpoint_condition (addr, len, type, cond);
587 }
588
589 /* See target.h. */
590
591 bool
592 target_can_execute_reverse ()
593 {
594 return current_inferior ()->top_target ()->can_execute_reverse ();
595 }
596
597 ptid_t
598 target_get_ada_task_ptid (long lwp, long tid)
599 {
600 return current_inferior ()->top_target ()->get_ada_task_ptid (lwp, tid);
601 }
602
603 bool
604 target_filesystem_is_local ()
605 {
606 return current_inferior ()->top_target ()->filesystem_is_local ();
607 }
608
609 void
610 target_trace_init ()
611 {
612 return current_inferior ()->top_target ()->trace_init ();
613 }
614
615 void
616 target_download_tracepoint (bp_location *location)
617 {
618 return current_inferior ()->top_target ()->download_tracepoint (location);
619 }
620
621 bool
622 target_can_download_tracepoint ()
623 {
624 return current_inferior ()->top_target ()->can_download_tracepoint ();
625 }
626
627 void
628 target_download_trace_state_variable (const trace_state_variable &tsv)
629 {
630 target_ops *target = current_inferior ()->top_target ();
631
632 return target->download_trace_state_variable (tsv);
633 }
634
635 void
636 target_enable_tracepoint (bp_location *loc)
637 {
638 return current_inferior ()->top_target ()->enable_tracepoint (loc);
639 }
640
641 void
642 target_disable_tracepoint (bp_location *loc)
643 {
644 return current_inferior ()->top_target ()->disable_tracepoint (loc);
645 }
646
647 void
648 target_trace_start ()
649 {
650 return current_inferior ()->top_target ()->trace_start ();
651 }
652
653 void
654 target_trace_set_readonly_regions ()
655 {
656 return current_inferior ()->top_target ()->trace_set_readonly_regions ();
657 }
658
659 int
660 target_get_trace_status (trace_status *ts)
661 {
662 return current_inferior ()->top_target ()->get_trace_status (ts);
663 }
664
665 void
666 target_get_tracepoint_status (breakpoint *tp, uploaded_tp *utp)
667 {
668 return current_inferior ()->top_target ()->get_tracepoint_status (tp, utp);
669 }
670
671 void
672 target_trace_stop ()
673 {
674 return current_inferior ()->top_target ()->trace_stop ();
675 }
676
677 int
678 target_trace_find (trace_find_type type, int num,
679 CORE_ADDR addr1, CORE_ADDR addr2, int *tpp)
680 {
681 target_ops *target = current_inferior ()->top_target ();
682
683 return target->trace_find (type, num, addr1, addr2, tpp);
684 }
685
686 bool
687 target_get_trace_state_variable_value (int tsv, LONGEST *val)
688 {
689 target_ops *target = current_inferior ()->top_target ();
690
691 return target->get_trace_state_variable_value (tsv, val);
692 }
693
694 int
695 target_save_trace_data (const char *filename)
696 {
697 return current_inferior ()->top_target ()->save_trace_data (filename);
698 }
699
700 int
701 target_upload_tracepoints (uploaded_tp **utpp)
702 {
703 return current_inferior ()->top_target ()->upload_tracepoints (utpp);
704 }
705
706 int
707 target_upload_trace_state_variables (uploaded_tsv **utsvp)
708 {
709 target_ops *target = current_inferior ()->top_target ();
710
711 return target->upload_trace_state_variables (utsvp);
712 }
713
714 LONGEST
715 target_get_raw_trace_data (gdb_byte *buf, ULONGEST offset, LONGEST len)
716 {
717 target_ops *target = current_inferior ()->top_target ();
718
719 return target->get_raw_trace_data (buf, offset, len);
720 }
721
722 int
723 target_get_min_fast_tracepoint_insn_len ()
724 {
725 target_ops *target = current_inferior ()->top_target ();
726
727 return target->get_min_fast_tracepoint_insn_len ();
728 }
729
730 void
731 target_set_disconnected_tracing (int val)
732 {
733 return current_inferior ()->top_target ()->set_disconnected_tracing (val);
734 }
735
736 void
737 target_set_circular_trace_buffer (int val)
738 {
739 return current_inferior ()->top_target ()->set_circular_trace_buffer (val);
740 }
741
742 void
743 target_set_trace_buffer_size (LONGEST val)
744 {
745 return current_inferior ()->top_target ()->set_trace_buffer_size (val);
746 }
747
748 bool
749 target_set_trace_notes (const char *user, const char *notes,
750 const char *stopnotes)
751 {
752 target_ops *target = current_inferior ()->top_target ();
753
754 return target->set_trace_notes (user, notes, stopnotes);
755 }
756
757 bool
758 target_get_tib_address (ptid_t ptid, CORE_ADDR *addr)
759 {
760 return current_inferior ()->top_target ()->get_tib_address (ptid, addr);
761 }
762
763 void
764 target_set_permissions ()
765 {
766 return current_inferior ()->top_target ()->set_permissions ();
767 }
768
769 bool
770 target_static_tracepoint_marker_at (CORE_ADDR addr,
771 static_tracepoint_marker *marker)
772 {
773 target_ops *target = current_inferior ()->top_target ();
774
775 return target->static_tracepoint_marker_at (addr, marker);
776 }
777
778 std::vector<static_tracepoint_marker>
779 target_static_tracepoint_markers_by_strid (const char *marker_id)
780 {
781 target_ops *target = current_inferior ()->top_target ();
782
783 return target->static_tracepoint_markers_by_strid (marker_id);
784 }
785
786 traceframe_info_up
787 target_traceframe_info ()
788 {
789 return current_inferior ()->top_target ()->traceframe_info ();
790 }
791
792 bool
793 target_use_agent (bool use)
794 {
795 return current_inferior ()->top_target ()->use_agent (use);
796 }
797
798 bool
799 target_can_use_agent ()
800 {
801 return current_inferior ()->top_target ()->can_use_agent ();
802 }
803
804 bool
805 target_augmented_libraries_svr4_read ()
806 {
807 return current_inferior ()->top_target ()->augmented_libraries_svr4_read ();
808 }
809
810 bool
811 target_supports_memory_tagging ()
812 {
813 return current_inferior ()->top_target ()->supports_memory_tagging ();
814 }
815
816 bool
817 target_fetch_memtags (CORE_ADDR address, size_t len, gdb::byte_vector &tags,
818 int type)
819 {
820 return current_inferior ()->top_target ()->fetch_memtags (address, len, tags, type);
821 }
822
823 bool
824 target_store_memtags (CORE_ADDR address, size_t len,
825 const gdb::byte_vector &tags, int type)
826 {
827 return current_inferior ()->top_target ()->store_memtags (address, len, tags, type);
828 }
829
830 void
831 target_log_command (const char *p)
832 {
833 return current_inferior ()->top_target ()->log_command (p);
834 }
835
836 /* This is used to implement the various target commands. */
837
838 static void
839 open_target (const char *args, int from_tty, struct cmd_list_element *command)
840 {
841 auto *ti = static_cast<target_info *> (command->context ());
842 target_open_ftype *func = target_factories[ti];
843
844 if (targetdebug)
845 fprintf_unfiltered (gdb_stdlog, "-> %s->open (...)\n",
846 ti->shortname);
847
848 func (args, from_tty);
849
850 if (targetdebug)
851 fprintf_unfiltered (gdb_stdlog, "<- %s->open (%s, %d)\n",
852 ti->shortname, args, from_tty);
853 }
854
855 /* See target.h. */
856
857 void
858 add_target (const target_info &t, target_open_ftype *func,
859 completer_ftype *completer)
860 {
861 struct cmd_list_element *c;
862
863 auto &func_slot = target_factories[&t];
864 if (func_slot != nullptr)
865 internal_error (__FILE__, __LINE__,
866 _("target already added (\"%s\")."), t.shortname);
867 func_slot = func;
868
869 if (targetlist == NULL)
870 add_basic_prefix_cmd ("target", class_run, _("\
871 Connect to a target machine or process.\n\
872 The first argument is the type or protocol of the target machine.\n\
873 Remaining arguments are interpreted by the target protocol. For more\n\
874 information on the arguments for a particular protocol, type\n\
875 `help target ' followed by the protocol name."),
876 &targetlist, 0, &cmdlist);
877 c = add_cmd (t.shortname, no_class, t.doc, &targetlist);
878 c->set_context ((void *) &t);
879 set_cmd_sfunc (c, open_target);
880 if (completer != NULL)
881 set_cmd_completer (c, completer);
882 }
883
884 /* See target.h. */
885
886 void
887 add_deprecated_target_alias (const target_info &tinfo, const char *alias)
888 {
889 struct cmd_list_element *c;
890 char *alt;
891
892 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
893 see PR cli/15104. */
894 c = add_cmd (alias, no_class, tinfo.doc, &targetlist);
895 set_cmd_sfunc (c, open_target);
896 c->set_context ((void *) &tinfo);
897 alt = xstrprintf ("target %s", tinfo.shortname);
898 deprecate_cmd (c, alt);
899 }
900
901 /* Stub functions */
902
903 void
904 target_kill (void)
905 {
906 current_inferior ()->top_target ()->kill ();
907 }
908
909 void
910 target_load (const char *arg, int from_tty)
911 {
912 target_dcache_invalidate ();
913 current_inferior ()->top_target ()->load (arg, from_tty);
914 }
915
916 /* Define it. */
917
918 target_terminal_state target_terminal::m_terminal_state
919 = target_terminal_state::is_ours;
920
921 /* See target/target.h. */
922
923 void
924 target_terminal::init (void)
925 {
926 current_inferior ()->top_target ()->terminal_init ();
927
928 m_terminal_state = target_terminal_state::is_ours;
929 }
930
931 /* See target/target.h. */
932
933 void
934 target_terminal::inferior (void)
935 {
936 struct ui *ui = current_ui;
937
938 /* A background resume (``run&'') should leave GDB in control of the
939 terminal. */
940 if (ui->prompt_state != PROMPT_BLOCKED)
941 return;
942
943 /* Since we always run the inferior in the main console (unless "set
944 inferior-tty" is in effect), when some UI other than the main one
945 calls target_terminal::inferior, then we leave the main UI's
946 terminal settings as is. */
947 if (ui != main_ui)
948 return;
949
950 /* If GDB is resuming the inferior in the foreground, install
951 inferior's terminal modes. */
952
953 struct inferior *inf = current_inferior ();
954
955 if (inf->terminal_state != target_terminal_state::is_inferior)
956 {
957 current_inferior ()->top_target ()->terminal_inferior ();
958 inf->terminal_state = target_terminal_state::is_inferior;
959 }
960
961 m_terminal_state = target_terminal_state::is_inferior;
962
963 /* If the user hit C-c before, pretend that it was hit right
964 here. */
965 if (check_quit_flag ())
966 target_pass_ctrlc ();
967 }
968
969 /* See target/target.h. */
970
971 void
972 target_terminal::restore_inferior (void)
973 {
974 struct ui *ui = current_ui;
975
976 /* See target_terminal::inferior(). */
977 if (ui->prompt_state != PROMPT_BLOCKED || ui != main_ui)
978 return;
979
980 /* Restore the terminal settings of inferiors that were in the
981 foreground but are now ours_for_output due to a temporary
982 target_target::ours_for_output() call. */
983
984 {
985 scoped_restore_current_inferior restore_inferior;
986
987 for (::inferior *inf : all_inferiors ())
988 {
989 if (inf->terminal_state == target_terminal_state::is_ours_for_output)
990 {
991 set_current_inferior (inf);
992 current_inferior ()->top_target ()->terminal_inferior ();
993 inf->terminal_state = target_terminal_state::is_inferior;
994 }
995 }
996 }
997
998 m_terminal_state = target_terminal_state::is_inferior;
999
1000 /* If the user hit C-c before, pretend that it was hit right
1001 here. */
1002 if (check_quit_flag ())
1003 target_pass_ctrlc ();
1004 }
1005
1006 /* Switch terminal state to DESIRED_STATE, either is_ours, or
1007 is_ours_for_output. */
1008
1009 static void
1010 target_terminal_is_ours_kind (target_terminal_state desired_state)
1011 {
1012 scoped_restore_current_inferior restore_inferior;
1013
1014 /* Must do this in two passes. First, have all inferiors save the
1015 current terminal settings. Then, after all inferiors have add a
1016 chance to safely save the terminal settings, restore GDB's
1017 terminal settings. */
1018
1019 for (inferior *inf : all_inferiors ())
1020 {
1021 if (inf->terminal_state == target_terminal_state::is_inferior)
1022 {
1023 set_current_inferior (inf);
1024 current_inferior ()->top_target ()->terminal_save_inferior ();
1025 }
1026 }
1027
1028 for (inferior *inf : all_inferiors ())
1029 {
1030 /* Note we don't check is_inferior here like above because we
1031 need to handle 'is_ours_for_output -> is_ours' too. Careful
1032 to never transition from 'is_ours' to 'is_ours_for_output',
1033 though. */
1034 if (inf->terminal_state != target_terminal_state::is_ours
1035 && inf->terminal_state != desired_state)
1036 {
1037 set_current_inferior (inf);
1038 if (desired_state == target_terminal_state::is_ours)
1039 current_inferior ()->top_target ()->terminal_ours ();
1040 else if (desired_state == target_terminal_state::is_ours_for_output)
1041 current_inferior ()->top_target ()->terminal_ours_for_output ();
1042 else
1043 gdb_assert_not_reached ("unhandled desired state");
1044 inf->terminal_state = desired_state;
1045 }
1046 }
1047 }
1048
1049 /* See target/target.h. */
1050
1051 void
1052 target_terminal::ours ()
1053 {
1054 struct ui *ui = current_ui;
1055
1056 /* See target_terminal::inferior. */
1057 if (ui != main_ui)
1058 return;
1059
1060 if (m_terminal_state == target_terminal_state::is_ours)
1061 return;
1062
1063 target_terminal_is_ours_kind (target_terminal_state::is_ours);
1064 m_terminal_state = target_terminal_state::is_ours;
1065 }
1066
1067 /* See target/target.h. */
1068
1069 void
1070 target_terminal::ours_for_output ()
1071 {
1072 struct ui *ui = current_ui;
1073
1074 /* See target_terminal::inferior. */
1075 if (ui != main_ui)
1076 return;
1077
1078 if (!target_terminal::is_inferior ())
1079 return;
1080
1081 target_terminal_is_ours_kind (target_terminal_state::is_ours_for_output);
1082 target_terminal::m_terminal_state = target_terminal_state::is_ours_for_output;
1083 }
1084
1085 /* See target/target.h. */
1086
1087 void
1088 target_terminal::info (const char *arg, int from_tty)
1089 {
1090 current_inferior ()->top_target ()->terminal_info (arg, from_tty);
1091 }
1092
1093 /* See target.h. */
1094
1095 bool
1096 target_supports_terminal_ours (void)
1097 {
1098 /* The current top target is the target at the top of the target
1099 stack of the current inferior. While normally there's always an
1100 inferior, we must check for nullptr here because we can get here
1101 very early during startup, before the initial inferior is first
1102 created. */
1103 inferior *inf = current_inferior ();
1104
1105 if (inf == nullptr)
1106 return false;
1107 return inf->top_target ()->supports_terminal_ours ();
1108 }
1109
1110 static void
1111 tcomplain (void)
1112 {
1113 error (_("You can't do that when your target is `%s'"),
1114 current_inferior ()->top_target ()->shortname ());
1115 }
1116
1117 void
1118 noprocess (void)
1119 {
1120 error (_("You can't do that without a process to debug."));
1121 }
1122
1123 static void
1124 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
1125 {
1126 printf_unfiltered (_("No saved terminal information.\n"));
1127 }
1128
1129 /* A default implementation for the to_get_ada_task_ptid target method.
1130
1131 This function builds the PTID by using both LWP and TID as part of
1132 the PTID lwp and tid elements. The pid used is the pid of the
1133 inferior_ptid. */
1134
1135 static ptid_t
1136 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
1137 {
1138 return ptid_t (inferior_ptid.pid (), lwp, tid);
1139 }
1140
1141 static enum exec_direction_kind
1142 default_execution_direction (struct target_ops *self)
1143 {
1144 if (!target_can_execute_reverse ())
1145 return EXEC_FORWARD;
1146 else if (!target_can_async_p ())
1147 return EXEC_FORWARD;
1148 else
1149 gdb_assert_not_reached ("\
1150 to_execution_direction must be implemented for reverse async");
1151 }
1152
1153 /* See target.h. */
1154
1155 void
1156 decref_target (target_ops *t)
1157 {
1158 t->decref ();
1159 if (t->refcount () == 0)
1160 {
1161 if (t->stratum () == process_stratum)
1162 connection_list_remove (as_process_stratum_target (t));
1163 target_close (t);
1164 }
1165 }
1166
1167 /* See target.h. */
1168
1169 void
1170 target_stack::push (target_ops *t)
1171 {
1172 t->incref ();
1173
1174 strata stratum = t->stratum ();
1175
1176 if (stratum == process_stratum)
1177 connection_list_add (as_process_stratum_target (t));
1178
1179 /* If there's already a target at this stratum, remove it. */
1180
1181 if (m_stack[stratum] != NULL)
1182 unpush (m_stack[stratum]);
1183
1184 /* Now add the new one. */
1185 m_stack[stratum] = t;
1186
1187 if (m_top < stratum)
1188 m_top = stratum;
1189 }
1190
1191 /* See target.h. */
1192
1193 bool
1194 target_stack::unpush (target_ops *t)
1195 {
1196 gdb_assert (t != NULL);
1197
1198 strata stratum = t->stratum ();
1199
1200 if (stratum == dummy_stratum)
1201 internal_error (__FILE__, __LINE__,
1202 _("Attempt to unpush the dummy target"));
1203
1204 /* Look for the specified target. Note that a target can only occur
1205 once in the target stack. */
1206
1207 if (m_stack[stratum] != t)
1208 {
1209 /* If T wasn't pushed, quit. Only open targets should be
1210 closed. */
1211 return false;
1212 }
1213
1214 /* Unchain the target. */
1215 m_stack[stratum] = NULL;
1216
1217 if (m_top == stratum)
1218 m_top = this->find_beneath (t)->stratum ();
1219
1220 /* Finally close the target, if there are no inferiors
1221 referencing this target still. Note we do this after unchaining,
1222 so any target method calls from within the target_close
1223 implementation don't end up in T anymore. Do leave the target
1224 open if we have are other inferiors referencing this target
1225 still. */
1226 decref_target (t);
1227
1228 return true;
1229 }
1230
1231 /* Unpush TARGET and assert that it worked. */
1232
1233 static void
1234 unpush_target_and_assert (struct target_ops *target)
1235 {
1236 if (!current_inferior ()->unpush_target (target))
1237 {
1238 fprintf_unfiltered (gdb_stderr,
1239 "pop_all_targets couldn't find target %s\n",
1240 target->shortname ());
1241 internal_error (__FILE__, __LINE__,
1242 _("failed internal consistency check"));
1243 }
1244 }
1245
1246 void
1247 pop_all_targets_above (enum strata above_stratum)
1248 {
1249 while ((int) (current_inferior ()->top_target ()->stratum ())
1250 > (int) above_stratum)
1251 unpush_target_and_assert (current_inferior ()->top_target ());
1252 }
1253
1254 /* See target.h. */
1255
1256 void
1257 pop_all_targets_at_and_above (enum strata stratum)
1258 {
1259 while ((int) (current_inferior ()->top_target ()->stratum ())
1260 >= (int) stratum)
1261 unpush_target_and_assert (current_inferior ()->top_target ());
1262 }
1263
1264 void
1265 pop_all_targets (void)
1266 {
1267 pop_all_targets_above (dummy_stratum);
1268 }
1269
1270 void
1271 target_unpusher::operator() (struct target_ops *ops) const
1272 {
1273 current_inferior ()->unpush_target (ops);
1274 }
1275
1276 /* Default implementation of to_get_thread_local_address. */
1277
1278 static void
1279 generic_tls_error (void)
1280 {
1281 throw_error (TLS_GENERIC_ERROR,
1282 _("Cannot find thread-local variables on this target"));
1283 }
1284
1285 /* Using the objfile specified in OBJFILE, find the address for the
1286 current thread's thread-local storage with offset OFFSET. */
1287 CORE_ADDR
1288 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1289 {
1290 volatile CORE_ADDR addr = 0;
1291 struct target_ops *target = current_inferior ()->top_target ();
1292 struct gdbarch *gdbarch = target_gdbarch ();
1293
1294 if (gdbarch_fetch_tls_load_module_address_p (gdbarch))
1295 {
1296 ptid_t ptid = inferior_ptid;
1297
1298 try
1299 {
1300 CORE_ADDR lm_addr;
1301
1302 /* Fetch the load module address for this objfile. */
1303 lm_addr = gdbarch_fetch_tls_load_module_address (gdbarch,
1304 objfile);
1305
1306 if (gdbarch_get_thread_local_address_p (gdbarch))
1307 addr = gdbarch_get_thread_local_address (gdbarch, ptid, lm_addr,
1308 offset);
1309 else
1310 addr = target->get_thread_local_address (ptid, lm_addr, offset);
1311 }
1312 /* If an error occurred, print TLS related messages here. Otherwise,
1313 throw the error to some higher catcher. */
1314 catch (const gdb_exception &ex)
1315 {
1316 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1317
1318 switch (ex.error)
1319 {
1320 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1321 error (_("Cannot find thread-local variables "
1322 "in this thread library."));
1323 break;
1324 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1325 if (objfile_is_library)
1326 error (_("Cannot find shared library `%s' in dynamic"
1327 " linker's load module list"), objfile_name (objfile));
1328 else
1329 error (_("Cannot find executable file `%s' in dynamic"
1330 " linker's load module list"), objfile_name (objfile));
1331 break;
1332 case TLS_NOT_ALLOCATED_YET_ERROR:
1333 if (objfile_is_library)
1334 error (_("The inferior has not yet allocated storage for"
1335 " thread-local variables in\n"
1336 "the shared library `%s'\n"
1337 "for %s"),
1338 objfile_name (objfile),
1339 target_pid_to_str (ptid).c_str ());
1340 else
1341 error (_("The inferior has not yet allocated storage for"
1342 " thread-local variables in\n"
1343 "the executable `%s'\n"
1344 "for %s"),
1345 objfile_name (objfile),
1346 target_pid_to_str (ptid).c_str ());
1347 break;
1348 case TLS_GENERIC_ERROR:
1349 if (objfile_is_library)
1350 error (_("Cannot find thread-local storage for %s, "
1351 "shared library %s:\n%s"),
1352 target_pid_to_str (ptid).c_str (),
1353 objfile_name (objfile), ex.what ());
1354 else
1355 error (_("Cannot find thread-local storage for %s, "
1356 "executable file %s:\n%s"),
1357 target_pid_to_str (ptid).c_str (),
1358 objfile_name (objfile), ex.what ());
1359 break;
1360 default:
1361 throw;
1362 break;
1363 }
1364 }
1365 }
1366 else
1367 error (_("Cannot find thread-local variables on this target"));
1368
1369 return addr;
1370 }
1371
1372 const char *
1373 target_xfer_status_to_string (enum target_xfer_status status)
1374 {
1375 #define CASE(X) case X: return #X
1376 switch (status)
1377 {
1378 CASE(TARGET_XFER_E_IO);
1379 CASE(TARGET_XFER_UNAVAILABLE);
1380 default:
1381 return "<unknown>";
1382 }
1383 #undef CASE
1384 };
1385
1386
1387 /* See target.h. */
1388
1389 gdb::unique_xmalloc_ptr<char>
1390 target_read_string (CORE_ADDR memaddr, int len, int *bytes_read)
1391 {
1392 gdb::unique_xmalloc_ptr<gdb_byte> buffer;
1393
1394 int ignore;
1395 if (bytes_read == nullptr)
1396 bytes_read = &ignore;
1397
1398 /* Note that the endian-ness does not matter here. */
1399 int errcode = read_string (memaddr, -1, 1, len, BFD_ENDIAN_LITTLE,
1400 &buffer, bytes_read);
1401 if (errcode != 0)
1402 return {};
1403
1404 return gdb::unique_xmalloc_ptr<char> ((char *) buffer.release ());
1405 }
1406
1407 const target_section_table *
1408 target_get_section_table (struct target_ops *target)
1409 {
1410 return target->get_section_table ();
1411 }
1412
1413 /* Find a section containing ADDR. */
1414
1415 const struct target_section *
1416 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1417 {
1418 const target_section_table *table = target_get_section_table (target);
1419
1420 if (table == NULL)
1421 return NULL;
1422
1423 for (const target_section &secp : *table)
1424 {
1425 if (addr >= secp.addr && addr < secp.endaddr)
1426 return &secp;
1427 }
1428 return NULL;
1429 }
1430
1431 /* See target.h. */
1432
1433 const target_section_table *
1434 default_get_section_table ()
1435 {
1436 return &current_program_space->target_sections ();
1437 }
1438
1439 /* Helper for the memory xfer routines. Checks the attributes of the
1440 memory region of MEMADDR against the read or write being attempted.
1441 If the access is permitted returns true, otherwise returns false.
1442 REGION_P is an optional output parameter. If not-NULL, it is
1443 filled with a pointer to the memory region of MEMADDR. REG_LEN
1444 returns LEN trimmed to the end of the region. This is how much the
1445 caller can continue requesting, if the access is permitted. A
1446 single xfer request must not straddle memory region boundaries. */
1447
1448 static int
1449 memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
1450 ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
1451 struct mem_region **region_p)
1452 {
1453 struct mem_region *region;
1454
1455 region = lookup_mem_region (memaddr);
1456
1457 if (region_p != NULL)
1458 *region_p = region;
1459
1460 switch (region->attrib.mode)
1461 {
1462 case MEM_RO:
1463 if (writebuf != NULL)
1464 return 0;
1465 break;
1466
1467 case MEM_WO:
1468 if (readbuf != NULL)
1469 return 0;
1470 break;
1471
1472 case MEM_FLASH:
1473 /* We only support writing to flash during "load" for now. */
1474 if (writebuf != NULL)
1475 error (_("Writing to flash memory forbidden in this context"));
1476 break;
1477
1478 case MEM_NONE:
1479 return 0;
1480 }
1481
1482 /* region->hi == 0 means there's no upper bound. */
1483 if (memaddr + len < region->hi || region->hi == 0)
1484 *reg_len = len;
1485 else
1486 *reg_len = region->hi - memaddr;
1487
1488 return 1;
1489 }
1490
1491 /* Read memory from more than one valid target. A core file, for
1492 instance, could have some of memory but delegate other bits to
1493 the target below it. So, we must manually try all targets. */
1494
1495 enum target_xfer_status
1496 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1497 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1498 ULONGEST *xfered_len)
1499 {
1500 enum target_xfer_status res;
1501
1502 do
1503 {
1504 res = ops->xfer_partial (TARGET_OBJECT_MEMORY, NULL,
1505 readbuf, writebuf, memaddr, len,
1506 xfered_len);
1507 if (res == TARGET_XFER_OK)
1508 break;
1509
1510 /* Stop if the target reports that the memory is not available. */
1511 if (res == TARGET_XFER_UNAVAILABLE)
1512 break;
1513
1514 /* Don't continue past targets which have all the memory.
1515 At one time, this code was necessary to read data from
1516 executables / shared libraries when data for the requested
1517 addresses weren't available in the core file. But now the
1518 core target handles this case itself. */
1519 if (ops->has_all_memory ())
1520 break;
1521
1522 ops = ops->beneath ();
1523 }
1524 while (ops != NULL);
1525
1526 /* The cache works at the raw memory level. Make sure the cache
1527 gets updated with raw contents no matter what kind of memory
1528 object was originally being written. Note we do write-through
1529 first, so that if it fails, we don't write to the cache contents
1530 that never made it to the target. */
1531 if (writebuf != NULL
1532 && inferior_ptid != null_ptid
1533 && target_dcache_init_p ()
1534 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
1535 {
1536 DCACHE *dcache = target_dcache_get ();
1537
1538 /* Note that writing to an area of memory which wasn't present
1539 in the cache doesn't cause it to be loaded in. */
1540 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
1541 }
1542
1543 return res;
1544 }
1545
1546 /* Perform a partial memory transfer.
1547 For docs see target.h, to_xfer_partial. */
1548
1549 static enum target_xfer_status
1550 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1551 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1552 ULONGEST len, ULONGEST *xfered_len)
1553 {
1554 enum target_xfer_status res;
1555 ULONGEST reg_len;
1556 struct mem_region *region;
1557 struct inferior *inf;
1558
1559 /* For accesses to unmapped overlay sections, read directly from
1560 files. Must do this first, as MEMADDR may need adjustment. */
1561 if (readbuf != NULL && overlay_debugging)
1562 {
1563 struct obj_section *section = find_pc_overlay (memaddr);
1564
1565 if (pc_in_unmapped_range (memaddr, section))
1566 {
1567 const target_section_table *table = target_get_section_table (ops);
1568 const char *section_name = section->the_bfd_section->name;
1569
1570 memaddr = overlay_mapped_address (memaddr, section);
1571
1572 auto match_cb = [=] (const struct target_section *s)
1573 {
1574 return (strcmp (section_name, s->the_bfd_section->name) == 0);
1575 };
1576
1577 return section_table_xfer_memory_partial (readbuf, writebuf,
1578 memaddr, len, xfered_len,
1579 *table, match_cb);
1580 }
1581 }
1582
1583 /* Try the executable files, if "trust-readonly-sections" is set. */
1584 if (readbuf != NULL && trust_readonly)
1585 {
1586 const struct target_section *secp
1587 = target_section_by_addr (ops, memaddr);
1588 if (secp != NULL
1589 && (bfd_section_flags (secp->the_bfd_section) & SEC_READONLY))
1590 {
1591 const target_section_table *table = target_get_section_table (ops);
1592 return section_table_xfer_memory_partial (readbuf, writebuf,
1593 memaddr, len, xfered_len,
1594 *table);
1595 }
1596 }
1597
1598 /* Try GDB's internal data cache. */
1599
1600 if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, &reg_len,
1601 &region))
1602 return TARGET_XFER_E_IO;
1603
1604 if (inferior_ptid != null_ptid)
1605 inf = current_inferior ();
1606 else
1607 inf = NULL;
1608
1609 if (inf != NULL
1610 && readbuf != NULL
1611 /* The dcache reads whole cache lines; that doesn't play well
1612 with reading from a trace buffer, because reading outside of
1613 the collected memory range fails. */
1614 && get_traceframe_number () == -1
1615 && (region->attrib.cache
1616 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1617 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1618 {
1619 DCACHE *dcache = target_dcache_get_or_init ();
1620
1621 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1622 reg_len, xfered_len);
1623 }
1624
1625 /* If none of those methods found the memory we wanted, fall back
1626 to a target partial transfer. Normally a single call to
1627 to_xfer_partial is enough; if it doesn't recognize an object
1628 it will call the to_xfer_partial of the next target down.
1629 But for memory this won't do. Memory is the only target
1630 object which can be read from more than one valid target.
1631 A core file, for instance, could have some of memory but
1632 delegate other bits to the target below it. So, we must
1633 manually try all targets. */
1634
1635 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1636 xfered_len);
1637
1638 /* If we still haven't got anything, return the last error. We
1639 give up. */
1640 return res;
1641 }
1642
1643 /* Perform a partial memory transfer. For docs see target.h,
1644 to_xfer_partial. */
1645
1646 static enum target_xfer_status
1647 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1648 gdb_byte *readbuf, const gdb_byte *writebuf,
1649 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1650 {
1651 enum target_xfer_status res;
1652
1653 /* Zero length requests are ok and require no work. */
1654 if (len == 0)
1655 return TARGET_XFER_EOF;
1656
1657 memaddr = address_significant (target_gdbarch (), memaddr);
1658
1659 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1660 breakpoint insns, thus hiding out from higher layers whether
1661 there are software breakpoints inserted in the code stream. */
1662 if (readbuf != NULL)
1663 {
1664 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1665 xfered_len);
1666
1667 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1668 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1669 }
1670 else
1671 {
1672 /* A large write request is likely to be partially satisfied
1673 by memory_xfer_partial_1. We will continually malloc
1674 and free a copy of the entire write request for breakpoint
1675 shadow handling even though we only end up writing a small
1676 subset of it. Cap writes to a limit specified by the target
1677 to mitigate this. */
1678 len = std::min (ops->get_memory_xfer_limit (), len);
1679
1680 gdb::byte_vector buf (writebuf, writebuf + len);
1681 breakpoint_xfer_memory (NULL, buf.data (), writebuf, memaddr, len);
1682 res = memory_xfer_partial_1 (ops, object, NULL, buf.data (), memaddr, len,
1683 xfered_len);
1684 }
1685
1686 return res;
1687 }
1688
1689 scoped_restore_tmpl<int>
1690 make_scoped_restore_show_memory_breakpoints (int show)
1691 {
1692 return make_scoped_restore (&show_memory_breakpoints, show);
1693 }
1694
1695 /* For docs see target.h, to_xfer_partial. */
1696
1697 enum target_xfer_status
1698 target_xfer_partial (struct target_ops *ops,
1699 enum target_object object, const char *annex,
1700 gdb_byte *readbuf, const gdb_byte *writebuf,
1701 ULONGEST offset, ULONGEST len,
1702 ULONGEST *xfered_len)
1703 {
1704 enum target_xfer_status retval;
1705
1706 /* Transfer is done when LEN is zero. */
1707 if (len == 0)
1708 return TARGET_XFER_EOF;
1709
1710 if (writebuf && !may_write_memory)
1711 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1712 core_addr_to_string_nz (offset), plongest (len));
1713
1714 *xfered_len = 0;
1715
1716 /* If this is a memory transfer, let the memory-specific code
1717 have a look at it instead. Memory transfers are more
1718 complicated. */
1719 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1720 || object == TARGET_OBJECT_CODE_MEMORY)
1721 retval = memory_xfer_partial (ops, object, readbuf,
1722 writebuf, offset, len, xfered_len);
1723 else if (object == TARGET_OBJECT_RAW_MEMORY)
1724 {
1725 /* Skip/avoid accessing the target if the memory region
1726 attributes block the access. Check this here instead of in
1727 raw_memory_xfer_partial as otherwise we'd end up checking
1728 this twice in the case of the memory_xfer_partial path is
1729 taken; once before checking the dcache, and another in the
1730 tail call to raw_memory_xfer_partial. */
1731 if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
1732 NULL))
1733 return TARGET_XFER_E_IO;
1734
1735 /* Request the normal memory object from other layers. */
1736 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1737 xfered_len);
1738 }
1739 else
1740 retval = ops->xfer_partial (object, annex, readbuf,
1741 writebuf, offset, len, xfered_len);
1742
1743 if (targetdebug)
1744 {
1745 const unsigned char *myaddr = NULL;
1746
1747 fprintf_unfiltered (gdb_stdlog,
1748 "%s:target_xfer_partial "
1749 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1750 ops->shortname (),
1751 (int) object,
1752 (annex ? annex : "(null)"),
1753 host_address_to_string (readbuf),
1754 host_address_to_string (writebuf),
1755 core_addr_to_string_nz (offset),
1756 pulongest (len), retval,
1757 pulongest (*xfered_len));
1758
1759 if (readbuf)
1760 myaddr = readbuf;
1761 if (writebuf)
1762 myaddr = writebuf;
1763 if (retval == TARGET_XFER_OK && myaddr != NULL)
1764 {
1765 int i;
1766
1767 fputs_unfiltered (", bytes =", gdb_stdlog);
1768 for (i = 0; i < *xfered_len; i++)
1769 {
1770 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1771 {
1772 if (targetdebug < 2 && i > 0)
1773 {
1774 fprintf_unfiltered (gdb_stdlog, " ...");
1775 break;
1776 }
1777 fprintf_unfiltered (gdb_stdlog, "\n");
1778 }
1779
1780 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1781 }
1782 }
1783
1784 fputc_unfiltered ('\n', gdb_stdlog);
1785 }
1786
1787 /* Check implementations of to_xfer_partial update *XFERED_LEN
1788 properly. Do assertion after printing debug messages, so that we
1789 can find more clues on assertion failure from debugging messages. */
1790 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1791 gdb_assert (*xfered_len > 0);
1792
1793 return retval;
1794 }
1795
1796 /* Read LEN bytes of target memory at address MEMADDR, placing the
1797 results in GDB's memory at MYADDR. Returns either 0 for success or
1798 -1 if any error occurs.
1799
1800 If an error occurs, no guarantee is made about the contents of the data at
1801 MYADDR. In particular, the caller should not depend upon partial reads
1802 filling the buffer with good data. There is no way for the caller to know
1803 how much good data might have been transfered anyway. Callers that can
1804 deal with partial reads should call target_read (which will retry until
1805 it makes no progress, and then return how much was transferred). */
1806
1807 int
1808 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1809 {
1810 if (target_read (current_inferior ()->top_target (),
1811 TARGET_OBJECT_MEMORY, NULL,
1812 myaddr, memaddr, len) == len)
1813 return 0;
1814 else
1815 return -1;
1816 }
1817
1818 /* See target/target.h. */
1819
1820 int
1821 target_read_uint32 (CORE_ADDR memaddr, uint32_t *result)
1822 {
1823 gdb_byte buf[4];
1824 int r;
1825
1826 r = target_read_memory (memaddr, buf, sizeof buf);
1827 if (r != 0)
1828 return r;
1829 *result = extract_unsigned_integer (buf, sizeof buf,
1830 gdbarch_byte_order (target_gdbarch ()));
1831 return 0;
1832 }
1833
1834 /* Like target_read_memory, but specify explicitly that this is a read
1835 from the target's raw memory. That is, this read bypasses the
1836 dcache, breakpoint shadowing, etc. */
1837
1838 int
1839 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1840 {
1841 if (target_read (current_inferior ()->top_target (),
1842 TARGET_OBJECT_RAW_MEMORY, NULL,
1843 myaddr, memaddr, len) == len)
1844 return 0;
1845 else
1846 return -1;
1847 }
1848
1849 /* Like target_read_memory, but specify explicitly that this is a read from
1850 the target's stack. This may trigger different cache behavior. */
1851
1852 int
1853 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1854 {
1855 if (target_read (current_inferior ()->top_target (),
1856 TARGET_OBJECT_STACK_MEMORY, NULL,
1857 myaddr, memaddr, len) == len)
1858 return 0;
1859 else
1860 return -1;
1861 }
1862
1863 /* Like target_read_memory, but specify explicitly that this is a read from
1864 the target's code. This may trigger different cache behavior. */
1865
1866 int
1867 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1868 {
1869 if (target_read (current_inferior ()->top_target (),
1870 TARGET_OBJECT_CODE_MEMORY, NULL,
1871 myaddr, memaddr, len) == len)
1872 return 0;
1873 else
1874 return -1;
1875 }
1876
1877 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1878 Returns either 0 for success or -1 if any error occurs. If an
1879 error occurs, no guarantee is made about how much data got written.
1880 Callers that can deal with partial writes should call
1881 target_write. */
1882
1883 int
1884 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1885 {
1886 if (target_write (current_inferior ()->top_target (),
1887 TARGET_OBJECT_MEMORY, NULL,
1888 myaddr, memaddr, len) == len)
1889 return 0;
1890 else
1891 return -1;
1892 }
1893
1894 /* Write LEN bytes from MYADDR to target raw memory at address
1895 MEMADDR. Returns either 0 for success or -1 if any error occurs.
1896 If an error occurs, no guarantee is made about how much data got
1897 written. Callers that can deal with partial writes should call
1898 target_write. */
1899
1900 int
1901 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1902 {
1903 if (target_write (current_inferior ()->top_target (),
1904 TARGET_OBJECT_RAW_MEMORY, NULL,
1905 myaddr, memaddr, len) == len)
1906 return 0;
1907 else
1908 return -1;
1909 }
1910
1911 /* Fetch the target's memory map. */
1912
1913 std::vector<mem_region>
1914 target_memory_map (void)
1915 {
1916 target_ops *target = current_inferior ()->top_target ();
1917 std::vector<mem_region> result = target->memory_map ();
1918 if (result.empty ())
1919 return result;
1920
1921 std::sort (result.begin (), result.end ());
1922
1923 /* Check that regions do not overlap. Simultaneously assign
1924 a numbering for the "mem" commands to use to refer to
1925 each region. */
1926 mem_region *last_one = NULL;
1927 for (size_t ix = 0; ix < result.size (); ix++)
1928 {
1929 mem_region *this_one = &result[ix];
1930 this_one->number = ix;
1931
1932 if (last_one != NULL && last_one->hi > this_one->lo)
1933 {
1934 warning (_("Overlapping regions in memory map: ignoring"));
1935 return std::vector<mem_region> ();
1936 }
1937
1938 last_one = this_one;
1939 }
1940
1941 return result;
1942 }
1943
1944 void
1945 target_flash_erase (ULONGEST address, LONGEST length)
1946 {
1947 current_inferior ()->top_target ()->flash_erase (address, length);
1948 }
1949
1950 void
1951 target_flash_done (void)
1952 {
1953 current_inferior ()->top_target ()->flash_done ();
1954 }
1955
1956 static void
1957 show_trust_readonly (struct ui_file *file, int from_tty,
1958 struct cmd_list_element *c, const char *value)
1959 {
1960 fprintf_filtered (file,
1961 _("Mode for reading from readonly sections is %s.\n"),
1962 value);
1963 }
1964
1965 /* Target vector read/write partial wrapper functions. */
1966
1967 static enum target_xfer_status
1968 target_read_partial (struct target_ops *ops,
1969 enum target_object object,
1970 const char *annex, gdb_byte *buf,
1971 ULONGEST offset, ULONGEST len,
1972 ULONGEST *xfered_len)
1973 {
1974 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1975 xfered_len);
1976 }
1977
1978 static enum target_xfer_status
1979 target_write_partial (struct target_ops *ops,
1980 enum target_object object,
1981 const char *annex, const gdb_byte *buf,
1982 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1983 {
1984 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1985 xfered_len);
1986 }
1987
1988 /* Wrappers to perform the full transfer. */
1989
1990 /* For docs on target_read see target.h. */
1991
1992 LONGEST
1993 target_read (struct target_ops *ops,
1994 enum target_object object,
1995 const char *annex, gdb_byte *buf,
1996 ULONGEST offset, LONGEST len)
1997 {
1998 LONGEST xfered_total = 0;
1999 int unit_size = 1;
2000
2001 /* If we are reading from a memory object, find the length of an addressable
2002 unit for that architecture. */
2003 if (object == TARGET_OBJECT_MEMORY
2004 || object == TARGET_OBJECT_STACK_MEMORY
2005 || object == TARGET_OBJECT_CODE_MEMORY
2006 || object == TARGET_OBJECT_RAW_MEMORY)
2007 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2008
2009 while (xfered_total < len)
2010 {
2011 ULONGEST xfered_partial;
2012 enum target_xfer_status status;
2013
2014 status = target_read_partial (ops, object, annex,
2015 buf + xfered_total * unit_size,
2016 offset + xfered_total, len - xfered_total,
2017 &xfered_partial);
2018
2019 /* Call an observer, notifying them of the xfer progress? */
2020 if (status == TARGET_XFER_EOF)
2021 return xfered_total;
2022 else if (status == TARGET_XFER_OK)
2023 {
2024 xfered_total += xfered_partial;
2025 QUIT;
2026 }
2027 else
2028 return TARGET_XFER_E_IO;
2029
2030 }
2031 return len;
2032 }
2033
2034 /* Assuming that the entire [begin, end) range of memory cannot be
2035 read, try to read whatever subrange is possible to read.
2036
2037 The function returns, in RESULT, either zero or one memory block.
2038 If there's a readable subrange at the beginning, it is completely
2039 read and returned. Any further readable subrange will not be read.
2040 Otherwise, if there's a readable subrange at the end, it will be
2041 completely read and returned. Any readable subranges before it
2042 (obviously, not starting at the beginning), will be ignored. In
2043 other cases -- either no readable subrange, or readable subrange(s)
2044 that is neither at the beginning, or end, nothing is returned.
2045
2046 The purpose of this function is to handle a read across a boundary
2047 of accessible memory in a case when memory map is not available.
2048 The above restrictions are fine for this case, but will give
2049 incorrect results if the memory is 'patchy'. However, supporting
2050 'patchy' memory would require trying to read every single byte,
2051 and it seems unacceptable solution. Explicit memory map is
2052 recommended for this case -- and target_read_memory_robust will
2053 take care of reading multiple ranges then. */
2054
2055 static void
2056 read_whatever_is_readable (struct target_ops *ops,
2057 const ULONGEST begin, const ULONGEST end,
2058 int unit_size,
2059 std::vector<memory_read_result> *result)
2060 {
2061 ULONGEST current_begin = begin;
2062 ULONGEST current_end = end;
2063 int forward;
2064 ULONGEST xfered_len;
2065
2066 /* If we previously failed to read 1 byte, nothing can be done here. */
2067 if (end - begin <= 1)
2068 return;
2069
2070 gdb::unique_xmalloc_ptr<gdb_byte> buf ((gdb_byte *) xmalloc (end - begin));
2071
2072 /* Check that either first or the last byte is readable, and give up
2073 if not. This heuristic is meant to permit reading accessible memory
2074 at the boundary of accessible region. */
2075 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2076 buf.get (), begin, 1, &xfered_len) == TARGET_XFER_OK)
2077 {
2078 forward = 1;
2079 ++current_begin;
2080 }
2081 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2082 buf.get () + (end - begin) - 1, end - 1, 1,
2083 &xfered_len) == TARGET_XFER_OK)
2084 {
2085 forward = 0;
2086 --current_end;
2087 }
2088 else
2089 return;
2090
2091 /* Loop invariant is that the [current_begin, current_end) was previously
2092 found to be not readable as a whole.
2093
2094 Note loop condition -- if the range has 1 byte, we can't divide the range
2095 so there's no point trying further. */
2096 while (current_end - current_begin > 1)
2097 {
2098 ULONGEST first_half_begin, first_half_end;
2099 ULONGEST second_half_begin, second_half_end;
2100 LONGEST xfer;
2101 ULONGEST middle = current_begin + (current_end - current_begin) / 2;
2102
2103 if (forward)
2104 {
2105 first_half_begin = current_begin;
2106 first_half_end = middle;
2107 second_half_begin = middle;
2108 second_half_end = current_end;
2109 }
2110 else
2111 {
2112 first_half_begin = middle;
2113 first_half_end = current_end;
2114 second_half_begin = current_begin;
2115 second_half_end = middle;
2116 }
2117
2118 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2119 buf.get () + (first_half_begin - begin) * unit_size,
2120 first_half_begin,
2121 first_half_end - first_half_begin);
2122
2123 if (xfer == first_half_end - first_half_begin)
2124 {
2125 /* This half reads up fine. So, the error must be in the
2126 other half. */
2127 current_begin = second_half_begin;
2128 current_end = second_half_end;
2129 }
2130 else
2131 {
2132 /* This half is not readable. Because we've tried one byte, we
2133 know some part of this half if actually readable. Go to the next
2134 iteration to divide again and try to read.
2135
2136 We don't handle the other half, because this function only tries
2137 to read a single readable subrange. */
2138 current_begin = first_half_begin;
2139 current_end = first_half_end;
2140 }
2141 }
2142
2143 if (forward)
2144 {
2145 /* The [begin, current_begin) range has been read. */
2146 result->emplace_back (begin, current_end, std::move (buf));
2147 }
2148 else
2149 {
2150 /* The [current_end, end) range has been read. */
2151 LONGEST region_len = end - current_end;
2152
2153 gdb::unique_xmalloc_ptr<gdb_byte> data
2154 ((gdb_byte *) xmalloc (region_len * unit_size));
2155 memcpy (data.get (), buf.get () + (current_end - begin) * unit_size,
2156 region_len * unit_size);
2157 result->emplace_back (current_end, end, std::move (data));
2158 }
2159 }
2160
2161 std::vector<memory_read_result>
2162 read_memory_robust (struct target_ops *ops,
2163 const ULONGEST offset, const LONGEST len)
2164 {
2165 std::vector<memory_read_result> result;
2166 int unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2167
2168 LONGEST xfered_total = 0;
2169 while (xfered_total < len)
2170 {
2171 struct mem_region *region = lookup_mem_region (offset + xfered_total);
2172 LONGEST region_len;
2173
2174 /* If there is no explicit region, a fake one should be created. */
2175 gdb_assert (region);
2176
2177 if (region->hi == 0)
2178 region_len = len - xfered_total;
2179 else
2180 region_len = region->hi - offset;
2181
2182 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2183 {
2184 /* Cannot read this region. Note that we can end up here only
2185 if the region is explicitly marked inaccessible, or
2186 'inaccessible-by-default' is in effect. */
2187 xfered_total += region_len;
2188 }
2189 else
2190 {
2191 LONGEST to_read = std::min (len - xfered_total, region_len);
2192 gdb::unique_xmalloc_ptr<gdb_byte> buffer
2193 ((gdb_byte *) xmalloc (to_read * unit_size));
2194
2195 LONGEST xfered_partial =
2196 target_read (ops, TARGET_OBJECT_MEMORY, NULL, buffer.get (),
2197 offset + xfered_total, to_read);
2198 /* Call an observer, notifying them of the xfer progress? */
2199 if (xfered_partial <= 0)
2200 {
2201 /* Got an error reading full chunk. See if maybe we can read
2202 some subrange. */
2203 read_whatever_is_readable (ops, offset + xfered_total,
2204 offset + xfered_total + to_read,
2205 unit_size, &result);
2206 xfered_total += to_read;
2207 }
2208 else
2209 {
2210 result.emplace_back (offset + xfered_total,
2211 offset + xfered_total + xfered_partial,
2212 std::move (buffer));
2213 xfered_total += xfered_partial;
2214 }
2215 QUIT;
2216 }
2217 }
2218
2219 return result;
2220 }
2221
2222
2223 /* An alternative to target_write with progress callbacks. */
2224
2225 LONGEST
2226 target_write_with_progress (struct target_ops *ops,
2227 enum target_object object,
2228 const char *annex, const gdb_byte *buf,
2229 ULONGEST offset, LONGEST len,
2230 void (*progress) (ULONGEST, void *), void *baton)
2231 {
2232 LONGEST xfered_total = 0;
2233 int unit_size = 1;
2234
2235 /* If we are writing to a memory object, find the length of an addressable
2236 unit for that architecture. */
2237 if (object == TARGET_OBJECT_MEMORY
2238 || object == TARGET_OBJECT_STACK_MEMORY
2239 || object == TARGET_OBJECT_CODE_MEMORY
2240 || object == TARGET_OBJECT_RAW_MEMORY)
2241 unit_size = gdbarch_addressable_memory_unit_size (target_gdbarch ());
2242
2243 /* Give the progress callback a chance to set up. */
2244 if (progress)
2245 (*progress) (0, baton);
2246
2247 while (xfered_total < len)
2248 {
2249 ULONGEST xfered_partial;
2250 enum target_xfer_status status;
2251
2252 status = target_write_partial (ops, object, annex,
2253 buf + xfered_total * unit_size,
2254 offset + xfered_total, len - xfered_total,
2255 &xfered_partial);
2256
2257 if (status != TARGET_XFER_OK)
2258 return status == TARGET_XFER_EOF ? xfered_total : TARGET_XFER_E_IO;
2259
2260 if (progress)
2261 (*progress) (xfered_partial, baton);
2262
2263 xfered_total += xfered_partial;
2264 QUIT;
2265 }
2266 return len;
2267 }
2268
2269 /* For docs on target_write see target.h. */
2270
2271 LONGEST
2272 target_write (struct target_ops *ops,
2273 enum target_object object,
2274 const char *annex, const gdb_byte *buf,
2275 ULONGEST offset, LONGEST len)
2276 {
2277 return target_write_with_progress (ops, object, annex, buf, offset, len,
2278 NULL, NULL);
2279 }
2280
2281 /* Help for target_read_alloc and target_read_stralloc. See their comments
2282 for details. */
2283
2284 template <typename T>
2285 gdb::optional<gdb::def_vector<T>>
2286 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2287 const char *annex)
2288 {
2289 gdb::def_vector<T> buf;
2290 size_t buf_pos = 0;
2291 const int chunk = 4096;
2292
2293 /* This function does not have a length parameter; it reads the
2294 entire OBJECT). Also, it doesn't support objects fetched partly
2295 from one target and partly from another (in a different stratum,
2296 e.g. a core file and an executable). Both reasons make it
2297 unsuitable for reading memory. */
2298 gdb_assert (object != TARGET_OBJECT_MEMORY);
2299
2300 /* Start by reading up to 4K at a time. The target will throttle
2301 this number down if necessary. */
2302 while (1)
2303 {
2304 ULONGEST xfered_len;
2305 enum target_xfer_status status;
2306
2307 buf.resize (buf_pos + chunk);
2308
2309 status = target_read_partial (ops, object, annex,
2310 (gdb_byte *) &buf[buf_pos],
2311 buf_pos, chunk,
2312 &xfered_len);
2313
2314 if (status == TARGET_XFER_EOF)
2315 {
2316 /* Read all there was. */
2317 buf.resize (buf_pos);
2318 return buf;
2319 }
2320 else if (status != TARGET_XFER_OK)
2321 {
2322 /* An error occurred. */
2323 return {};
2324 }
2325
2326 buf_pos += xfered_len;
2327
2328 QUIT;
2329 }
2330 }
2331
2332 /* See target.h */
2333
2334 gdb::optional<gdb::byte_vector>
2335 target_read_alloc (struct target_ops *ops, enum target_object object,
2336 const char *annex)
2337 {
2338 return target_read_alloc_1<gdb_byte> (ops, object, annex);
2339 }
2340
2341 /* See target.h. */
2342
2343 gdb::optional<gdb::char_vector>
2344 target_read_stralloc (struct target_ops *ops, enum target_object object,
2345 const char *annex)
2346 {
2347 gdb::optional<gdb::char_vector> buf
2348 = target_read_alloc_1<char> (ops, object, annex);
2349
2350 if (!buf)
2351 return {};
2352
2353 if (buf->empty () || buf->back () != '\0')
2354 buf->push_back ('\0');
2355
2356 /* Check for embedded NUL bytes; but allow trailing NULs. */
2357 for (auto it = std::find (buf->begin (), buf->end (), '\0');
2358 it != buf->end (); it++)
2359 if (*it != '\0')
2360 {
2361 warning (_("target object %d, annex %s, "
2362 "contained unexpected null characters"),
2363 (int) object, annex ? annex : "(none)");
2364 break;
2365 }
2366
2367 return buf;
2368 }
2369
2370 /* Memory transfer methods. */
2371
2372 void
2373 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2374 LONGEST len)
2375 {
2376 /* This method is used to read from an alternate, non-current
2377 target. This read must bypass the overlay support (as symbols
2378 don't match this target), and GDB's internal cache (wrong cache
2379 for this target). */
2380 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2381 != len)
2382 memory_error (TARGET_XFER_E_IO, addr);
2383 }
2384
2385 ULONGEST
2386 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2387 int len, enum bfd_endian byte_order)
2388 {
2389 gdb_byte buf[sizeof (ULONGEST)];
2390
2391 gdb_assert (len <= sizeof (buf));
2392 get_target_memory (ops, addr, buf, len);
2393 return extract_unsigned_integer (buf, len, byte_order);
2394 }
2395
2396 /* See target.h. */
2397
2398 int
2399 target_insert_breakpoint (struct gdbarch *gdbarch,
2400 struct bp_target_info *bp_tgt)
2401 {
2402 if (!may_insert_breakpoints)
2403 {
2404 warning (_("May not insert breakpoints"));
2405 return 1;
2406 }
2407
2408 target_ops *target = current_inferior ()->top_target ();
2409
2410 return target->insert_breakpoint (gdbarch, bp_tgt);
2411 }
2412
2413 /* See target.h. */
2414
2415 int
2416 target_remove_breakpoint (struct gdbarch *gdbarch,
2417 struct bp_target_info *bp_tgt,
2418 enum remove_bp_reason reason)
2419 {
2420 /* This is kind of a weird case to handle, but the permission might
2421 have been changed after breakpoints were inserted - in which case
2422 we should just take the user literally and assume that any
2423 breakpoints should be left in place. */
2424 if (!may_insert_breakpoints)
2425 {
2426 warning (_("May not remove breakpoints"));
2427 return 1;
2428 }
2429
2430 target_ops *target = current_inferior ()->top_target ();
2431
2432 return target->remove_breakpoint (gdbarch, bp_tgt, reason);
2433 }
2434
2435 static void
2436 info_target_command (const char *args, int from_tty)
2437 {
2438 int has_all_mem = 0;
2439
2440 if (current_program_space->symfile_object_file != NULL)
2441 {
2442 objfile *objf = current_program_space->symfile_object_file;
2443 printf_unfiltered (_("Symbols from \"%s\".\n"),
2444 objfile_name (objf));
2445 }
2446
2447 for (target_ops *t = current_inferior ()->top_target ();
2448 t != NULL;
2449 t = t->beneath ())
2450 {
2451 if (!t->has_memory ())
2452 continue;
2453
2454 if ((int) (t->stratum ()) <= (int) dummy_stratum)
2455 continue;
2456 if (has_all_mem)
2457 printf_unfiltered (_("\tWhile running this, "
2458 "GDB does not access memory from...\n"));
2459 printf_unfiltered ("%s:\n", t->longname ());
2460 t->files_info ();
2461 has_all_mem = t->has_all_memory ();
2462 }
2463 }
2464
2465 /* This function is called before any new inferior is created, e.g.
2466 by running a program, attaching, or connecting to a target.
2467 It cleans up any state from previous invocations which might
2468 change between runs. This is a subset of what target_preopen
2469 resets (things which might change between targets). */
2470
2471 void
2472 target_pre_inferior (int from_tty)
2473 {
2474 /* Clear out solib state. Otherwise the solib state of the previous
2475 inferior might have survived and is entirely wrong for the new
2476 target. This has been observed on GNU/Linux using glibc 2.3. How
2477 to reproduce:
2478
2479 bash$ ./foo&
2480 [1] 4711
2481 bash$ ./foo&
2482 [1] 4712
2483 bash$ gdb ./foo
2484 [...]
2485 (gdb) attach 4711
2486 (gdb) detach
2487 (gdb) attach 4712
2488 Cannot access memory at address 0xdeadbeef
2489 */
2490
2491 /* In some OSs, the shared library list is the same/global/shared
2492 across inferiors. If code is shared between processes, so are
2493 memory regions and features. */
2494 if (!gdbarch_has_global_solist (target_gdbarch ()))
2495 {
2496 no_shared_libraries (NULL, from_tty);
2497
2498 invalidate_target_mem_regions ();
2499
2500 target_clear_description ();
2501 }
2502
2503 /* attach_flag may be set if the previous process associated with
2504 the inferior was attached to. */
2505 current_inferior ()->attach_flag = 0;
2506
2507 current_inferior ()->highest_thread_num = 0;
2508
2509 agent_capability_invalidate ();
2510 }
2511
2512 /* This is to be called by the open routine before it does
2513 anything. */
2514
2515 void
2516 target_preopen (int from_tty)
2517 {
2518 dont_repeat ();
2519
2520 if (current_inferior ()->pid != 0)
2521 {
2522 if (!from_tty
2523 || !target_has_execution ()
2524 || query (_("A program is being debugged already. Kill it? ")))
2525 {
2526 /* Core inferiors actually should be detached, not
2527 killed. */
2528 if (target_has_execution ())
2529 target_kill ();
2530 else
2531 target_detach (current_inferior (), 0);
2532 }
2533 else
2534 error (_("Program not killed."));
2535 }
2536
2537 /* Calling target_kill may remove the target from the stack. But if
2538 it doesn't (which seems like a win for UDI), remove it now. */
2539 /* Leave the exec target, though. The user may be switching from a
2540 live process to a core of the same program. */
2541 pop_all_targets_above (file_stratum);
2542
2543 target_pre_inferior (from_tty);
2544 }
2545
2546 /* See target.h. */
2547
2548 void
2549 target_detach (inferior *inf, int from_tty)
2550 {
2551 /* After we have detached, we will clear the register cache for this inferior
2552 by calling registers_changed_ptid. We must save the pid_ptid before
2553 detaching, as the target detach method will clear inf->pid. */
2554 ptid_t save_pid_ptid = ptid_t (inf->pid);
2555
2556 /* As long as some to_detach implementations rely on the current_inferior
2557 (either directly, or indirectly, like through target_gdbarch or by
2558 reading memory), INF needs to be the current inferior. When that
2559 requirement will become no longer true, then we can remove this
2560 assertion. */
2561 gdb_assert (inf == current_inferior ());
2562
2563 prepare_for_detach ();
2564
2565 /* Hold a strong reference because detaching may unpush the
2566 target. */
2567 auto proc_target_ref = target_ops_ref::new_reference (inf->process_target ());
2568
2569 current_inferior ()->top_target ()->detach (inf, from_tty);
2570
2571 process_stratum_target *proc_target
2572 = as_process_stratum_target (proc_target_ref.get ());
2573
2574 registers_changed_ptid (proc_target, save_pid_ptid);
2575
2576 /* We have to ensure we have no frame cache left. Normally,
2577 registers_changed_ptid (save_pid_ptid) calls reinit_frame_cache when
2578 inferior_ptid matches save_pid_ptid, but in our case, it does not
2579 call it, as inferior_ptid has been reset. */
2580 reinit_frame_cache ();
2581 }
2582
2583 void
2584 target_disconnect (const char *args, int from_tty)
2585 {
2586 /* If we're in breakpoints-always-inserted mode or if breakpoints
2587 are global across processes, we have to remove them before
2588 disconnecting. */
2589 remove_breakpoints ();
2590
2591 current_inferior ()->top_target ()->disconnect (args, from_tty);
2592 }
2593
2594 /* See target/target.h. */
2595
2596 ptid_t
2597 target_wait (ptid_t ptid, struct target_waitstatus *status,
2598 target_wait_flags options)
2599 {
2600 target_ops *target = current_inferior ()->top_target ();
2601 process_stratum_target *proc_target = current_inferior ()->process_target ();
2602
2603 gdb_assert (!proc_target->commit_resumed_state);
2604
2605 if (!target->can_async_p ())
2606 gdb_assert ((options & TARGET_WNOHANG) == 0);
2607
2608 return target->wait (ptid, status, options);
2609 }
2610
2611 /* See target.h. */
2612
2613 ptid_t
2614 default_target_wait (struct target_ops *ops,
2615 ptid_t ptid, struct target_waitstatus *status,
2616 target_wait_flags options)
2617 {
2618 status->kind = TARGET_WAITKIND_IGNORE;
2619 return minus_one_ptid;
2620 }
2621
2622 std::string
2623 target_pid_to_str (ptid_t ptid)
2624 {
2625 return current_inferior ()->top_target ()->pid_to_str (ptid);
2626 }
2627
2628 const char *
2629 target_thread_name (struct thread_info *info)
2630 {
2631 gdb_assert (info->inf == current_inferior ());
2632
2633 return current_inferior ()->top_target ()->thread_name (info);
2634 }
2635
2636 struct thread_info *
2637 target_thread_handle_to_thread_info (const gdb_byte *thread_handle,
2638 int handle_len,
2639 struct inferior *inf)
2640 {
2641 target_ops *target = current_inferior ()->top_target ();
2642
2643 return target->thread_handle_to_thread_info (thread_handle, handle_len, inf);
2644 }
2645
2646 /* See target.h. */
2647
2648 gdb::byte_vector
2649 target_thread_info_to_thread_handle (struct thread_info *tip)
2650 {
2651 target_ops *target = current_inferior ()->top_target ();
2652
2653 return target->thread_info_to_thread_handle (tip);
2654 }
2655
2656 void
2657 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2658 {
2659 process_stratum_target *curr_target = current_inferior ()->process_target ();
2660 gdb_assert (!curr_target->commit_resumed_state);
2661
2662 target_dcache_invalidate ();
2663
2664 current_inferior ()->top_target ()->resume (ptid, step, signal);
2665
2666 registers_changed_ptid (curr_target, ptid);
2667 /* We only set the internal executing state here. The user/frontend
2668 running state is set at a higher level. This also clears the
2669 thread's stop_pc as side effect. */
2670 set_executing (curr_target, ptid, true);
2671 clear_inline_frame_state (curr_target, ptid);
2672 }
2673
2674 /* See target.h. */
2675
2676 void
2677 target_commit_resumed ()
2678 {
2679 gdb_assert (current_inferior ()->process_target ()->commit_resumed_state);
2680 current_inferior ()->top_target ()->commit_resumed ();
2681 }
2682
2683 /* See target.h. */
2684
2685 bool
2686 target_has_pending_events ()
2687 {
2688 return current_inferior ()->top_target ()->has_pending_events ();
2689 }
2690
2691 void
2692 target_pass_signals (gdb::array_view<const unsigned char> pass_signals)
2693 {
2694 current_inferior ()->top_target ()->pass_signals (pass_signals);
2695 }
2696
2697 void
2698 target_program_signals (gdb::array_view<const unsigned char> program_signals)
2699 {
2700 current_inferior ()->top_target ()->program_signals (program_signals);
2701 }
2702
2703 static void
2704 default_follow_fork (struct target_ops *self, bool follow_child,
2705 bool detach_fork)
2706 {
2707 /* Some target returned a fork event, but did not know how to follow it. */
2708 internal_error (__FILE__, __LINE__,
2709 _("could not find a target to follow fork"));
2710 }
2711
2712 /* See target.h. */
2713
2714 void
2715 target_follow_fork (bool follow_child, bool detach_fork)
2716 {
2717 target_ops *target = current_inferior ()->top_target ();
2718
2719 return target->follow_fork (follow_child, detach_fork);
2720 }
2721
2722 /* See target.h. */
2723
2724 void
2725 target_follow_exec (inferior *follow_inf, ptid_t ptid,
2726 const char *execd_pathname)
2727 {
2728 current_inferior ()->top_target ()->follow_exec (follow_inf, ptid,
2729 execd_pathname);
2730 }
2731
2732 static void
2733 default_mourn_inferior (struct target_ops *self)
2734 {
2735 internal_error (__FILE__, __LINE__,
2736 _("could not find a target to follow mourn inferior"));
2737 }
2738
2739 void
2740 target_mourn_inferior (ptid_t ptid)
2741 {
2742 gdb_assert (ptid.pid () == inferior_ptid.pid ());
2743 current_inferior ()->top_target ()->mourn_inferior ();
2744
2745 /* We no longer need to keep handles on any of the object files.
2746 Make sure to release them to avoid unnecessarily locking any
2747 of them while we're not actually debugging. */
2748 bfd_cache_close_all ();
2749 }
2750
2751 /* Look for a target which can describe architectural features, starting
2752 from TARGET. If we find one, return its description. */
2753
2754 const struct target_desc *
2755 target_read_description (struct target_ops *target)
2756 {
2757 return target->read_description ();
2758 }
2759
2760
2761 /* Default implementation of memory-searching. */
2762
2763 static int
2764 default_search_memory (struct target_ops *self,
2765 CORE_ADDR start_addr, ULONGEST search_space_len,
2766 const gdb_byte *pattern, ULONGEST pattern_len,
2767 CORE_ADDR *found_addrp)
2768 {
2769 auto read_memory = [=] (CORE_ADDR addr, gdb_byte *result, size_t len)
2770 {
2771 return target_read (current_inferior ()->top_target (),
2772 TARGET_OBJECT_MEMORY, NULL,
2773 result, addr, len) == len;
2774 };
2775
2776 /* Start over from the top of the target stack. */
2777 return simple_search_memory (read_memory, start_addr, search_space_len,
2778 pattern, pattern_len, found_addrp);
2779 }
2780
2781 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2782 sequence of bytes in PATTERN with length PATTERN_LEN.
2783
2784 The result is 1 if found, 0 if not found, and -1 if there was an error
2785 requiring halting of the search (e.g. memory read error).
2786 If the pattern is found the address is recorded in FOUND_ADDRP. */
2787
2788 int
2789 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2790 const gdb_byte *pattern, ULONGEST pattern_len,
2791 CORE_ADDR *found_addrp)
2792 {
2793 target_ops *target = current_inferior ()->top_target ();
2794
2795 return target->search_memory (start_addr, search_space_len, pattern,
2796 pattern_len, found_addrp);
2797 }
2798
2799 /* Look through the currently pushed targets. If none of them will
2800 be able to restart the currently running process, issue an error
2801 message. */
2802
2803 void
2804 target_require_runnable (void)
2805 {
2806 for (target_ops *t = current_inferior ()->top_target ();
2807 t != NULL;
2808 t = t->beneath ())
2809 {
2810 /* If this target knows how to create a new program, then
2811 assume we will still be able to after killing the current
2812 one. Either killing and mourning will not pop T, or else
2813 find_default_run_target will find it again. */
2814 if (t->can_create_inferior ())
2815 return;
2816
2817 /* Do not worry about targets at certain strata that can not
2818 create inferiors. Assume they will be pushed again if
2819 necessary, and continue to the process_stratum. */
2820 if (t->stratum () > process_stratum)
2821 continue;
2822
2823 error (_("The \"%s\" target does not support \"run\". "
2824 "Try \"help target\" or \"continue\"."),
2825 t->shortname ());
2826 }
2827
2828 /* This function is only called if the target is running. In that
2829 case there should have been a process_stratum target and it
2830 should either know how to create inferiors, or not... */
2831 internal_error (__FILE__, __LINE__, _("No targets found"));
2832 }
2833
2834 /* Whether GDB is allowed to fall back to the default run target for
2835 "run", "attach", etc. when no target is connected yet. */
2836 static bool auto_connect_native_target = true;
2837
2838 static void
2839 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2840 struct cmd_list_element *c, const char *value)
2841 {
2842 fprintf_filtered (file,
2843 _("Whether GDB may automatically connect to the "
2844 "native target is %s.\n"),
2845 value);
2846 }
2847
2848 /* A pointer to the target that can respond to "run" or "attach".
2849 Native targets are always singletons and instantiated early at GDB
2850 startup. */
2851 static target_ops *the_native_target;
2852
2853 /* See target.h. */
2854
2855 void
2856 set_native_target (target_ops *target)
2857 {
2858 if (the_native_target != NULL)
2859 internal_error (__FILE__, __LINE__,
2860 _("native target already set (\"%s\")."),
2861 the_native_target->longname ());
2862
2863 the_native_target = target;
2864 }
2865
2866 /* See target.h. */
2867
2868 target_ops *
2869 get_native_target ()
2870 {
2871 return the_native_target;
2872 }
2873
2874 /* Look through the list of possible targets for a target that can
2875 execute a run or attach command without any other data. This is
2876 used to locate the default process stratum.
2877
2878 If DO_MESG is not NULL, the result is always valid (error() is
2879 called for errors); else, return NULL on error. */
2880
2881 static struct target_ops *
2882 find_default_run_target (const char *do_mesg)
2883 {
2884 if (auto_connect_native_target && the_native_target != NULL)
2885 return the_native_target;
2886
2887 if (do_mesg != NULL)
2888 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2889 return NULL;
2890 }
2891
2892 /* See target.h. */
2893
2894 struct target_ops *
2895 find_attach_target (void)
2896 {
2897 /* If a target on the current stack can attach, use it. */
2898 for (target_ops *t = current_inferior ()->top_target ();
2899 t != NULL;
2900 t = t->beneath ())
2901 {
2902 if (t->can_attach ())
2903 return t;
2904 }
2905
2906 /* Otherwise, use the default run target for attaching. */
2907 return find_default_run_target ("attach");
2908 }
2909
2910 /* See target.h. */
2911
2912 struct target_ops *
2913 find_run_target (void)
2914 {
2915 /* If a target on the current stack can run, use it. */
2916 for (target_ops *t = current_inferior ()->top_target ();
2917 t != NULL;
2918 t = t->beneath ())
2919 {
2920 if (t->can_create_inferior ())
2921 return t;
2922 }
2923
2924 /* Otherwise, use the default run target. */
2925 return find_default_run_target ("run");
2926 }
2927
2928 bool
2929 target_ops::info_proc (const char *args, enum info_proc_what what)
2930 {
2931 return false;
2932 }
2933
2934 /* Implement the "info proc" command. */
2935
2936 int
2937 target_info_proc (const char *args, enum info_proc_what what)
2938 {
2939 struct target_ops *t;
2940
2941 /* If we're already connected to something that can get us OS
2942 related data, use it. Otherwise, try using the native
2943 target. */
2944 t = find_target_at (process_stratum);
2945 if (t == NULL)
2946 t = find_default_run_target (NULL);
2947
2948 for (; t != NULL; t = t->beneath ())
2949 {
2950 if (t->info_proc (args, what))
2951 {
2952 if (targetdebug)
2953 fprintf_unfiltered (gdb_stdlog,
2954 "target_info_proc (\"%s\", %d)\n", args, what);
2955
2956 return 1;
2957 }
2958 }
2959
2960 return 0;
2961 }
2962
2963 static int
2964 find_default_supports_disable_randomization (struct target_ops *self)
2965 {
2966 struct target_ops *t;
2967
2968 t = find_default_run_target (NULL);
2969 if (t != NULL)
2970 return t->supports_disable_randomization ();
2971 return 0;
2972 }
2973
2974 int
2975 target_supports_disable_randomization (void)
2976 {
2977 return current_inferior ()->top_target ()->supports_disable_randomization ();
2978 }
2979
2980 /* See target/target.h. */
2981
2982 int
2983 target_supports_multi_process (void)
2984 {
2985 return current_inferior ()->top_target ()->supports_multi_process ();
2986 }
2987
2988 /* See target.h. */
2989
2990 gdb::optional<gdb::char_vector>
2991 target_get_osdata (const char *type)
2992 {
2993 struct target_ops *t;
2994
2995 /* If we're already connected to something that can get us OS
2996 related data, use it. Otherwise, try using the native
2997 target. */
2998 t = find_target_at (process_stratum);
2999 if (t == NULL)
3000 t = find_default_run_target ("get OS data");
3001
3002 if (!t)
3003 return {};
3004
3005 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3006 }
3007
3008 /* Determine the current address space of thread PTID. */
3009
3010 struct address_space *
3011 target_thread_address_space (ptid_t ptid)
3012 {
3013 struct address_space *aspace;
3014
3015 aspace = current_inferior ()->top_target ()->thread_address_space (ptid);
3016 gdb_assert (aspace != NULL);
3017
3018 return aspace;
3019 }
3020
3021 /* See target.h. */
3022
3023 target_ops *
3024 target_ops::beneath () const
3025 {
3026 return current_inferior ()->find_target_beneath (this);
3027 }
3028
3029 void
3030 target_ops::close ()
3031 {
3032 }
3033
3034 bool
3035 target_ops::can_attach ()
3036 {
3037 return 0;
3038 }
3039
3040 void
3041 target_ops::attach (const char *, int)
3042 {
3043 gdb_assert_not_reached ("target_ops::attach called");
3044 }
3045
3046 bool
3047 target_ops::can_create_inferior ()
3048 {
3049 return 0;
3050 }
3051
3052 void
3053 target_ops::create_inferior (const char *, const std::string &,
3054 char **, int)
3055 {
3056 gdb_assert_not_reached ("target_ops::create_inferior called");
3057 }
3058
3059 bool
3060 target_ops::can_run ()
3061 {
3062 return false;
3063 }
3064
3065 int
3066 target_can_run ()
3067 {
3068 for (target_ops *t = current_inferior ()->top_target ();
3069 t != NULL;
3070 t = t->beneath ())
3071 {
3072 if (t->can_run ())
3073 return 1;
3074 }
3075
3076 return 0;
3077 }
3078
3079 /* Target file operations. */
3080
3081 static struct target_ops *
3082 default_fileio_target (void)
3083 {
3084 struct target_ops *t;
3085
3086 /* If we're already connected to something that can perform
3087 file I/O, use it. Otherwise, try using the native target. */
3088 t = find_target_at (process_stratum);
3089 if (t != NULL)
3090 return t;
3091 return find_default_run_target ("file I/O");
3092 }
3093
3094 /* File handle for target file operations. */
3095
3096 struct fileio_fh_t
3097 {
3098 /* The target on which this file is open. NULL if the target is
3099 meanwhile closed while the handle is open. */
3100 target_ops *target;
3101
3102 /* The file descriptor on the target. */
3103 int target_fd;
3104
3105 /* Check whether this fileio_fh_t represents a closed file. */
3106 bool is_closed ()
3107 {
3108 return target_fd < 0;
3109 }
3110 };
3111
3112 /* Vector of currently open file handles. The value returned by
3113 target_fileio_open and passed as the FD argument to other
3114 target_fileio_* functions is an index into this vector. This
3115 vector's entries are never freed; instead, files are marked as
3116 closed, and the handle becomes available for reuse. */
3117 static std::vector<fileio_fh_t> fileio_fhandles;
3118
3119 /* Index into fileio_fhandles of the lowest handle that might be
3120 closed. This permits handle reuse without searching the whole
3121 list each time a new file is opened. */
3122 static int lowest_closed_fd;
3123
3124 /* See target.h. */
3125
3126 void
3127 fileio_handles_invalidate_target (target_ops *targ)
3128 {
3129 for (fileio_fh_t &fh : fileio_fhandles)
3130 if (fh.target == targ)
3131 fh.target = NULL;
3132 }
3133
3134 /* Acquire a target fileio file descriptor. */
3135
3136 static int
3137 acquire_fileio_fd (target_ops *target, int target_fd)
3138 {
3139 /* Search for closed handles to reuse. */
3140 for (; lowest_closed_fd < fileio_fhandles.size (); lowest_closed_fd++)
3141 {
3142 fileio_fh_t &fh = fileio_fhandles[lowest_closed_fd];
3143
3144 if (fh.is_closed ())
3145 break;
3146 }
3147
3148 /* Push a new handle if no closed handles were found. */
3149 if (lowest_closed_fd == fileio_fhandles.size ())
3150 fileio_fhandles.push_back (fileio_fh_t {target, target_fd});
3151 else
3152 fileio_fhandles[lowest_closed_fd] = {target, target_fd};
3153
3154 /* Should no longer be marked closed. */
3155 gdb_assert (!fileio_fhandles[lowest_closed_fd].is_closed ());
3156
3157 /* Return its index, and start the next lookup at
3158 the next index. */
3159 return lowest_closed_fd++;
3160 }
3161
3162 /* Release a target fileio file descriptor. */
3163
3164 static void
3165 release_fileio_fd (int fd, fileio_fh_t *fh)
3166 {
3167 fh->target_fd = -1;
3168 lowest_closed_fd = std::min (lowest_closed_fd, fd);
3169 }
3170
3171 /* Return a pointer to the fileio_fhandle_t corresponding to FD. */
3172
3173 static fileio_fh_t *
3174 fileio_fd_to_fh (int fd)
3175 {
3176 return &fileio_fhandles[fd];
3177 }
3178
3179
3180 /* Default implementations of file i/o methods. We don't want these
3181 to delegate automatically, because we need to know which target
3182 supported the method, in order to call it directly from within
3183 pread/pwrite, etc. */
3184
3185 int
3186 target_ops::fileio_open (struct inferior *inf, const char *filename,
3187 int flags, int mode, int warn_if_slow,
3188 int *target_errno)
3189 {
3190 *target_errno = FILEIO_ENOSYS;
3191 return -1;
3192 }
3193
3194 int
3195 target_ops::fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3196 ULONGEST offset, int *target_errno)
3197 {
3198 *target_errno = FILEIO_ENOSYS;
3199 return -1;
3200 }
3201
3202 int
3203 target_ops::fileio_pread (int fd, gdb_byte *read_buf, int len,
3204 ULONGEST offset, int *target_errno)
3205 {
3206 *target_errno = FILEIO_ENOSYS;
3207 return -1;
3208 }
3209
3210 int
3211 target_ops::fileio_fstat (int fd, struct stat *sb, int *target_errno)
3212 {
3213 *target_errno = FILEIO_ENOSYS;
3214 return -1;
3215 }
3216
3217 int
3218 target_ops::fileio_close (int fd, int *target_errno)
3219 {
3220 *target_errno = FILEIO_ENOSYS;
3221 return -1;
3222 }
3223
3224 int
3225 target_ops::fileio_unlink (struct inferior *inf, const char *filename,
3226 int *target_errno)
3227 {
3228 *target_errno = FILEIO_ENOSYS;
3229 return -1;
3230 }
3231
3232 gdb::optional<std::string>
3233 target_ops::fileio_readlink (struct inferior *inf, const char *filename,
3234 int *target_errno)
3235 {
3236 *target_errno = FILEIO_ENOSYS;
3237 return {};
3238 }
3239
3240 /* See target.h. */
3241
3242 int
3243 target_fileio_open (struct inferior *inf, const char *filename,
3244 int flags, int mode, bool warn_if_slow, int *target_errno)
3245 {
3246 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3247 {
3248 int fd = t->fileio_open (inf, filename, flags, mode,
3249 warn_if_slow, target_errno);
3250
3251 if (fd == -1 && *target_errno == FILEIO_ENOSYS)
3252 continue;
3253
3254 if (fd < 0)
3255 fd = -1;
3256 else
3257 fd = acquire_fileio_fd (t, fd);
3258
3259 if (targetdebug)
3260 fprintf_unfiltered (gdb_stdlog,
3261 "target_fileio_open (%d,%s,0x%x,0%o,%d)"
3262 " = %d (%d)\n",
3263 inf == NULL ? 0 : inf->num,
3264 filename, flags, mode,
3265 warn_if_slow, fd,
3266 fd != -1 ? 0 : *target_errno);
3267 return fd;
3268 }
3269
3270 *target_errno = FILEIO_ENOSYS;
3271 return -1;
3272 }
3273
3274 /* See target.h. */
3275
3276 int
3277 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3278 ULONGEST offset, int *target_errno)
3279 {
3280 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3281 int ret = -1;
3282
3283 if (fh->is_closed ())
3284 *target_errno = EBADF;
3285 else if (fh->target == NULL)
3286 *target_errno = EIO;
3287 else
3288 ret = fh->target->fileio_pwrite (fh->target_fd, write_buf,
3289 len, offset, target_errno);
3290
3291 if (targetdebug)
3292 fprintf_unfiltered (gdb_stdlog,
3293 "target_fileio_pwrite (%d,...,%d,%s) "
3294 "= %d (%d)\n",
3295 fd, len, pulongest (offset),
3296 ret, ret != -1 ? 0 : *target_errno);
3297 return ret;
3298 }
3299
3300 /* See target.h. */
3301
3302 int
3303 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3304 ULONGEST offset, int *target_errno)
3305 {
3306 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3307 int ret = -1;
3308
3309 if (fh->is_closed ())
3310 *target_errno = EBADF;
3311 else if (fh->target == NULL)
3312 *target_errno = EIO;
3313 else
3314 ret = fh->target->fileio_pread (fh->target_fd, read_buf,
3315 len, offset, target_errno);
3316
3317 if (targetdebug)
3318 fprintf_unfiltered (gdb_stdlog,
3319 "target_fileio_pread (%d,...,%d,%s) "
3320 "= %d (%d)\n",
3321 fd, len, pulongest (offset),
3322 ret, ret != -1 ? 0 : *target_errno);
3323 return ret;
3324 }
3325
3326 /* See target.h. */
3327
3328 int
3329 target_fileio_fstat (int fd, struct stat *sb, int *target_errno)
3330 {
3331 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3332 int ret = -1;
3333
3334 if (fh->is_closed ())
3335 *target_errno = EBADF;
3336 else if (fh->target == NULL)
3337 *target_errno = EIO;
3338 else
3339 ret = fh->target->fileio_fstat (fh->target_fd, sb, target_errno);
3340
3341 if (targetdebug)
3342 fprintf_unfiltered (gdb_stdlog,
3343 "target_fileio_fstat (%d) = %d (%d)\n",
3344 fd, ret, ret != -1 ? 0 : *target_errno);
3345 return ret;
3346 }
3347
3348 /* See target.h. */
3349
3350 int
3351 target_fileio_close (int fd, int *target_errno)
3352 {
3353 fileio_fh_t *fh = fileio_fd_to_fh (fd);
3354 int ret = -1;
3355
3356 if (fh->is_closed ())
3357 *target_errno = EBADF;
3358 else
3359 {
3360 if (fh->target != NULL)
3361 ret = fh->target->fileio_close (fh->target_fd,
3362 target_errno);
3363 else
3364 ret = 0;
3365 release_fileio_fd (fd, fh);
3366 }
3367
3368 if (targetdebug)
3369 fprintf_unfiltered (gdb_stdlog,
3370 "target_fileio_close (%d) = %d (%d)\n",
3371 fd, ret, ret != -1 ? 0 : *target_errno);
3372 return ret;
3373 }
3374
3375 /* See target.h. */
3376
3377 int
3378 target_fileio_unlink (struct inferior *inf, const char *filename,
3379 int *target_errno)
3380 {
3381 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3382 {
3383 int ret = t->fileio_unlink (inf, filename, target_errno);
3384
3385 if (ret == -1 && *target_errno == FILEIO_ENOSYS)
3386 continue;
3387
3388 if (targetdebug)
3389 fprintf_unfiltered (gdb_stdlog,
3390 "target_fileio_unlink (%d,%s)"
3391 " = %d (%d)\n",
3392 inf == NULL ? 0 : inf->num, filename,
3393 ret, ret != -1 ? 0 : *target_errno);
3394 return ret;
3395 }
3396
3397 *target_errno = FILEIO_ENOSYS;
3398 return -1;
3399 }
3400
3401 /* See target.h. */
3402
3403 gdb::optional<std::string>
3404 target_fileio_readlink (struct inferior *inf, const char *filename,
3405 int *target_errno)
3406 {
3407 for (target_ops *t = default_fileio_target (); t != NULL; t = t->beneath ())
3408 {
3409 gdb::optional<std::string> ret
3410 = t->fileio_readlink (inf, filename, target_errno);
3411
3412 if (!ret.has_value () && *target_errno == FILEIO_ENOSYS)
3413 continue;
3414
3415 if (targetdebug)
3416 fprintf_unfiltered (gdb_stdlog,
3417 "target_fileio_readlink (%d,%s)"
3418 " = %s (%d)\n",
3419 inf == NULL ? 0 : inf->num,
3420 filename, ret ? ret->c_str () : "(nil)",
3421 ret ? 0 : *target_errno);
3422 return ret;
3423 }
3424
3425 *target_errno = FILEIO_ENOSYS;
3426 return {};
3427 }
3428
3429 /* Like scoped_fd, but specific to target fileio. */
3430
3431 class scoped_target_fd
3432 {
3433 public:
3434 explicit scoped_target_fd (int fd) noexcept
3435 : m_fd (fd)
3436 {
3437 }
3438
3439 ~scoped_target_fd ()
3440 {
3441 if (m_fd >= 0)
3442 {
3443 int target_errno;
3444
3445 target_fileio_close (m_fd, &target_errno);
3446 }
3447 }
3448
3449 DISABLE_COPY_AND_ASSIGN (scoped_target_fd);
3450
3451 int get () const noexcept
3452 {
3453 return m_fd;
3454 }
3455
3456 private:
3457 int m_fd;
3458 };
3459
3460 /* Read target file FILENAME, in the filesystem as seen by INF. If
3461 INF is NULL, use the filesystem seen by the debugger (GDB or, for
3462 remote targets, the remote stub). Store the result in *BUF_P and
3463 return the size of the transferred data. PADDING additional bytes
3464 are available in *BUF_P. This is a helper function for
3465 target_fileio_read_alloc; see the declaration of that function for
3466 more information. */
3467
3468 static LONGEST
3469 target_fileio_read_alloc_1 (struct inferior *inf, const char *filename,
3470 gdb_byte **buf_p, int padding)
3471 {
3472 size_t buf_alloc, buf_pos;
3473 gdb_byte *buf;
3474 LONGEST n;
3475 int target_errno;
3476
3477 scoped_target_fd fd (target_fileio_open (inf, filename, FILEIO_O_RDONLY,
3478 0700, false, &target_errno));
3479 if (fd.get () == -1)
3480 return -1;
3481
3482 /* Start by reading up to 4K at a time. The target will throttle
3483 this number down if necessary. */
3484 buf_alloc = 4096;
3485 buf = (gdb_byte *) xmalloc (buf_alloc);
3486 buf_pos = 0;
3487 while (1)
3488 {
3489 n = target_fileio_pread (fd.get (), &buf[buf_pos],
3490 buf_alloc - buf_pos - padding, buf_pos,
3491 &target_errno);
3492 if (n < 0)
3493 {
3494 /* An error occurred. */
3495 xfree (buf);
3496 return -1;
3497 }
3498 else if (n == 0)
3499 {
3500 /* Read all there was. */
3501 if (buf_pos == 0)
3502 xfree (buf);
3503 else
3504 *buf_p = buf;
3505 return buf_pos;
3506 }
3507
3508 buf_pos += n;
3509
3510 /* If the buffer is filling up, expand it. */
3511 if (buf_alloc < buf_pos * 2)
3512 {
3513 buf_alloc *= 2;
3514 buf = (gdb_byte *) xrealloc (buf, buf_alloc);
3515 }
3516
3517 QUIT;
3518 }
3519 }
3520
3521 /* See target.h. */
3522
3523 LONGEST
3524 target_fileio_read_alloc (struct inferior *inf, const char *filename,
3525 gdb_byte **buf_p)
3526 {
3527 return target_fileio_read_alloc_1 (inf, filename, buf_p, 0);
3528 }
3529
3530 /* See target.h. */
3531
3532 gdb::unique_xmalloc_ptr<char>
3533 target_fileio_read_stralloc (struct inferior *inf, const char *filename)
3534 {
3535 gdb_byte *buffer;
3536 char *bufstr;
3537 LONGEST i, transferred;
3538
3539 transferred = target_fileio_read_alloc_1 (inf, filename, &buffer, 1);
3540 bufstr = (char *) buffer;
3541
3542 if (transferred < 0)
3543 return gdb::unique_xmalloc_ptr<char> (nullptr);
3544
3545 if (transferred == 0)
3546 return make_unique_xstrdup ("");
3547
3548 bufstr[transferred] = 0;
3549
3550 /* Check for embedded NUL bytes; but allow trailing NULs. */
3551 for (i = strlen (bufstr); i < transferred; i++)
3552 if (bufstr[i] != 0)
3553 {
3554 warning (_("target file %s "
3555 "contained unexpected null characters"),
3556 filename);
3557 break;
3558 }
3559
3560 return gdb::unique_xmalloc_ptr<char> (bufstr);
3561 }
3562
3563
3564 static int
3565 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3566 CORE_ADDR addr, int len)
3567 {
3568 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3569 }
3570
3571 static int
3572 default_watchpoint_addr_within_range (struct target_ops *target,
3573 CORE_ADDR addr,
3574 CORE_ADDR start, int length)
3575 {
3576 return addr >= start && addr < start + length;
3577 }
3578
3579 /* See target.h. */
3580
3581 target_ops *
3582 target_stack::find_beneath (const target_ops *t) const
3583 {
3584 /* Look for a non-empty slot at stratum levels beneath T's. */
3585 for (int stratum = t->stratum () - 1; stratum >= 0; --stratum)
3586 if (m_stack[stratum] != NULL)
3587 return m_stack[stratum];
3588
3589 return NULL;
3590 }
3591
3592 /* See target.h. */
3593
3594 struct target_ops *
3595 find_target_at (enum strata stratum)
3596 {
3597 return current_inferior ()->target_at (stratum);
3598 }
3599
3600 \f
3601
3602 /* See target.h */
3603
3604 void
3605 target_announce_detach (int from_tty)
3606 {
3607 pid_t pid;
3608 const char *exec_file;
3609
3610 if (!from_tty)
3611 return;
3612
3613 exec_file = get_exec_file (0);
3614 if (exec_file == NULL)
3615 exec_file = "";
3616
3617 pid = inferior_ptid.pid ();
3618 printf_unfiltered (_("Detaching from program: %s, %s\n"), exec_file,
3619 target_pid_to_str (ptid_t (pid)).c_str ());
3620 }
3621
3622 /* The inferior process has died. Long live the inferior! */
3623
3624 void
3625 generic_mourn_inferior (void)
3626 {
3627 inferior *inf = current_inferior ();
3628
3629 switch_to_no_thread ();
3630
3631 /* Mark breakpoints uninserted in case something tries to delete a
3632 breakpoint while we delete the inferior's threads (which would
3633 fail, since the inferior is long gone). */
3634 mark_breakpoints_out ();
3635
3636 if (inf->pid != 0)
3637 exit_inferior (inf);
3638
3639 /* Note this wipes step-resume breakpoints, so needs to be done
3640 after exit_inferior, which ends up referencing the step-resume
3641 breakpoints through clear_thread_inferior_resources. */
3642 breakpoint_init_inferior (inf_exited);
3643
3644 registers_changed ();
3645
3646 reopen_exec_file ();
3647 reinit_frame_cache ();
3648
3649 if (deprecated_detach_hook)
3650 deprecated_detach_hook ();
3651 }
3652 \f
3653 /* Convert a normal process ID to a string. Returns the string in a
3654 static buffer. */
3655
3656 std::string
3657 normal_pid_to_str (ptid_t ptid)
3658 {
3659 return string_printf ("process %d", ptid.pid ());
3660 }
3661
3662 static std::string
3663 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
3664 {
3665 return normal_pid_to_str (ptid);
3666 }
3667
3668 /* Error-catcher for target_find_memory_regions. */
3669 static int
3670 dummy_find_memory_regions (struct target_ops *self,
3671 find_memory_region_ftype ignore1, void *ignore2)
3672 {
3673 error (_("Command not implemented for this target."));
3674 return 0;
3675 }
3676
3677 /* Error-catcher for target_make_corefile_notes. */
3678 static gdb::unique_xmalloc_ptr<char>
3679 dummy_make_corefile_notes (struct target_ops *self,
3680 bfd *ignore1, int *ignore2)
3681 {
3682 error (_("Command not implemented for this target."));
3683 return NULL;
3684 }
3685
3686 #include "target-delegates.c"
3687
3688 /* The initial current target, so that there is always a semi-valid
3689 current target. */
3690
3691 static dummy_target the_dummy_target;
3692
3693 /* See target.h. */
3694
3695 target_ops *
3696 get_dummy_target ()
3697 {
3698 return &the_dummy_target;
3699 }
3700
3701 static const target_info dummy_target_info = {
3702 "None",
3703 N_("None"),
3704 ""
3705 };
3706
3707 strata
3708 dummy_target::stratum () const
3709 {
3710 return dummy_stratum;
3711 }
3712
3713 strata
3714 debug_target::stratum () const
3715 {
3716 return debug_stratum;
3717 }
3718
3719 const target_info &
3720 dummy_target::info () const
3721 {
3722 return dummy_target_info;
3723 }
3724
3725 const target_info &
3726 debug_target::info () const
3727 {
3728 return beneath ()->info ();
3729 }
3730
3731 \f
3732
3733 void
3734 target_close (struct target_ops *targ)
3735 {
3736 for (inferior *inf : all_inferiors ())
3737 gdb_assert (!inf->target_is_pushed (targ));
3738
3739 fileio_handles_invalidate_target (targ);
3740
3741 targ->close ();
3742
3743 if (targetdebug)
3744 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3745 }
3746
3747 int
3748 target_thread_alive (ptid_t ptid)
3749 {
3750 return current_inferior ()->top_target ()->thread_alive (ptid);
3751 }
3752
3753 void
3754 target_update_thread_list (void)
3755 {
3756 current_inferior ()->top_target ()->update_thread_list ();
3757 }
3758
3759 void
3760 target_stop (ptid_t ptid)
3761 {
3762 process_stratum_target *proc_target = current_inferior ()->process_target ();
3763
3764 gdb_assert (!proc_target->commit_resumed_state);
3765
3766 if (!may_stop)
3767 {
3768 warning (_("May not interrupt or stop the target, ignoring attempt"));
3769 return;
3770 }
3771
3772 current_inferior ()->top_target ()->stop (ptid);
3773 }
3774
3775 void
3776 target_interrupt ()
3777 {
3778 if (!may_stop)
3779 {
3780 warning (_("May not interrupt or stop the target, ignoring attempt"));
3781 return;
3782 }
3783
3784 current_inferior ()->top_target ()->interrupt ();
3785 }
3786
3787 /* See target.h. */
3788
3789 void
3790 target_pass_ctrlc (void)
3791 {
3792 /* Pass the Ctrl-C to the first target that has a thread
3793 running. */
3794 for (inferior *inf : all_inferiors ())
3795 {
3796 target_ops *proc_target = inf->process_target ();
3797 if (proc_target == NULL)
3798 continue;
3799
3800 for (thread_info *thr : inf->non_exited_threads ())
3801 {
3802 /* A thread can be THREAD_STOPPED and executing, while
3803 running an infcall. */
3804 if (thr->state == THREAD_RUNNING || thr->executing)
3805 {
3806 /* We can get here quite deep in target layers. Avoid
3807 switching thread context or anything that would
3808 communicate with the target (e.g., to fetch
3809 registers), or flushing e.g., the frame cache. We
3810 just switch inferior in order to be able to call
3811 through the target_stack. */
3812 scoped_restore_current_inferior restore_inferior;
3813 set_current_inferior (inf);
3814 current_inferior ()->top_target ()->pass_ctrlc ();
3815 return;
3816 }
3817 }
3818 }
3819 }
3820
3821 /* See target.h. */
3822
3823 void
3824 default_target_pass_ctrlc (struct target_ops *ops)
3825 {
3826 target_interrupt ();
3827 }
3828
3829 /* See target/target.h. */
3830
3831 void
3832 target_stop_and_wait (ptid_t ptid)
3833 {
3834 struct target_waitstatus status;
3835 bool was_non_stop = non_stop;
3836
3837 non_stop = true;
3838 target_stop (ptid);
3839
3840 memset (&status, 0, sizeof (status));
3841 target_wait (ptid, &status, 0);
3842
3843 non_stop = was_non_stop;
3844 }
3845
3846 /* See target/target.h. */
3847
3848 void
3849 target_continue_no_signal (ptid_t ptid)
3850 {
3851 target_resume (ptid, 0, GDB_SIGNAL_0);
3852 }
3853
3854 /* See target/target.h. */
3855
3856 void
3857 target_continue (ptid_t ptid, enum gdb_signal signal)
3858 {
3859 target_resume (ptid, 0, signal);
3860 }
3861
3862 /* Concatenate ELEM to LIST, a comma-separated list. */
3863
3864 static void
3865 str_comma_list_concat_elem (std::string *list, const char *elem)
3866 {
3867 if (!list->empty ())
3868 list->append (", ");
3869
3870 list->append (elem);
3871 }
3872
3873 /* Helper for target_options_to_string. If OPT is present in
3874 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3875 OPT is removed from TARGET_OPTIONS. */
3876
3877 static void
3878 do_option (target_wait_flags *target_options, std::string *ret,
3879 target_wait_flag opt, const char *opt_str)
3880 {
3881 if ((*target_options & opt) != 0)
3882 {
3883 str_comma_list_concat_elem (ret, opt_str);
3884 *target_options &= ~opt;
3885 }
3886 }
3887
3888 /* See target.h. */
3889
3890 std::string
3891 target_options_to_string (target_wait_flags target_options)
3892 {
3893 std::string ret;
3894
3895 #define DO_TARG_OPTION(OPT) \
3896 do_option (&target_options, &ret, OPT, #OPT)
3897
3898 DO_TARG_OPTION (TARGET_WNOHANG);
3899
3900 if (target_options != 0)
3901 str_comma_list_concat_elem (&ret, "unknown???");
3902
3903 return ret;
3904 }
3905
3906 void
3907 target_fetch_registers (struct regcache *regcache, int regno)
3908 {
3909 current_inferior ()->top_target ()->fetch_registers (regcache, regno);
3910 if (targetdebug)
3911 regcache->debug_print_register ("target_fetch_registers", regno);
3912 }
3913
3914 void
3915 target_store_registers (struct regcache *regcache, int regno)
3916 {
3917 if (!may_write_registers)
3918 error (_("Writing to registers is not allowed (regno %d)"), regno);
3919
3920 current_inferior ()->top_target ()->store_registers (regcache, regno);
3921 if (targetdebug)
3922 {
3923 regcache->debug_print_register ("target_store_registers", regno);
3924 }
3925 }
3926
3927 int
3928 target_core_of_thread (ptid_t ptid)
3929 {
3930 return current_inferior ()->top_target ()->core_of_thread (ptid);
3931 }
3932
3933 int
3934 simple_verify_memory (struct target_ops *ops,
3935 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3936 {
3937 LONGEST total_xfered = 0;
3938
3939 while (total_xfered < size)
3940 {
3941 ULONGEST xfered_len;
3942 enum target_xfer_status status;
3943 gdb_byte buf[1024];
3944 ULONGEST howmuch = std::min<ULONGEST> (sizeof (buf), size - total_xfered);
3945
3946 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3947 buf, NULL, lma + total_xfered, howmuch,
3948 &xfered_len);
3949 if (status == TARGET_XFER_OK
3950 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3951 {
3952 total_xfered += xfered_len;
3953 QUIT;
3954 }
3955 else
3956 return 0;
3957 }
3958 return 1;
3959 }
3960
3961 /* Default implementation of memory verification. */
3962
3963 static int
3964 default_verify_memory (struct target_ops *self,
3965 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3966 {
3967 /* Start over from the top of the target stack. */
3968 return simple_verify_memory (current_inferior ()->top_target (),
3969 data, memaddr, size);
3970 }
3971
3972 int
3973 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3974 {
3975 target_ops *target = current_inferior ()->top_target ();
3976
3977 return target->verify_memory (data, memaddr, size);
3978 }
3979
3980 /* The documentation for this function is in its prototype declaration in
3981 target.h. */
3982
3983 int
3984 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3985 enum target_hw_bp_type rw)
3986 {
3987 target_ops *target = current_inferior ()->top_target ();
3988
3989 return target->insert_mask_watchpoint (addr, mask, rw);
3990 }
3991
3992 /* The documentation for this function is in its prototype declaration in
3993 target.h. */
3994
3995 int
3996 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask,
3997 enum target_hw_bp_type rw)
3998 {
3999 target_ops *target = current_inferior ()->top_target ();
4000
4001 return target->remove_mask_watchpoint (addr, mask, rw);
4002 }
4003
4004 /* The documentation for this function is in its prototype declaration
4005 in target.h. */
4006
4007 int
4008 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4009 {
4010 target_ops *target = current_inferior ()->top_target ();
4011
4012 return target->masked_watch_num_registers (addr, mask);
4013 }
4014
4015 /* The documentation for this function is in its prototype declaration
4016 in target.h. */
4017
4018 int
4019 target_ranged_break_num_registers (void)
4020 {
4021 return current_inferior ()->top_target ()->ranged_break_num_registers ();
4022 }
4023
4024 /* See target.h. */
4025
4026 struct btrace_target_info *
4027 target_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
4028 {
4029 return current_inferior ()->top_target ()->enable_btrace (ptid, conf);
4030 }
4031
4032 /* See target.h. */
4033
4034 void
4035 target_disable_btrace (struct btrace_target_info *btinfo)
4036 {
4037 current_inferior ()->top_target ()->disable_btrace (btinfo);
4038 }
4039
4040 /* See target.h. */
4041
4042 void
4043 target_teardown_btrace (struct btrace_target_info *btinfo)
4044 {
4045 current_inferior ()->top_target ()->teardown_btrace (btinfo);
4046 }
4047
4048 /* See target.h. */
4049
4050 enum btrace_error
4051 target_read_btrace (struct btrace_data *btrace,
4052 struct btrace_target_info *btinfo,
4053 enum btrace_read_type type)
4054 {
4055 target_ops *target = current_inferior ()->top_target ();
4056
4057 return target->read_btrace (btrace, btinfo, type);
4058 }
4059
4060 /* See target.h. */
4061
4062 const struct btrace_config *
4063 target_btrace_conf (const struct btrace_target_info *btinfo)
4064 {
4065 return current_inferior ()->top_target ()->btrace_conf (btinfo);
4066 }
4067
4068 /* See target.h. */
4069
4070 void
4071 target_stop_recording (void)
4072 {
4073 current_inferior ()->top_target ()->stop_recording ();
4074 }
4075
4076 /* See target.h. */
4077
4078 void
4079 target_save_record (const char *filename)
4080 {
4081 current_inferior ()->top_target ()->save_record (filename);
4082 }
4083
4084 /* See target.h. */
4085
4086 int
4087 target_supports_delete_record ()
4088 {
4089 return current_inferior ()->top_target ()->supports_delete_record ();
4090 }
4091
4092 /* See target.h. */
4093
4094 void
4095 target_delete_record (void)
4096 {
4097 current_inferior ()->top_target ()->delete_record ();
4098 }
4099
4100 /* See target.h. */
4101
4102 enum record_method
4103 target_record_method (ptid_t ptid)
4104 {
4105 return current_inferior ()->top_target ()->record_method (ptid);
4106 }
4107
4108 /* See target.h. */
4109
4110 int
4111 target_record_is_replaying (ptid_t ptid)
4112 {
4113 return current_inferior ()->top_target ()->record_is_replaying (ptid);
4114 }
4115
4116 /* See target.h. */
4117
4118 int
4119 target_record_will_replay (ptid_t ptid, int dir)
4120 {
4121 return current_inferior ()->top_target ()->record_will_replay (ptid, dir);
4122 }
4123
4124 /* See target.h. */
4125
4126 void
4127 target_record_stop_replaying (void)
4128 {
4129 current_inferior ()->top_target ()->record_stop_replaying ();
4130 }
4131
4132 /* See target.h. */
4133
4134 void
4135 target_goto_record_begin (void)
4136 {
4137 current_inferior ()->top_target ()->goto_record_begin ();
4138 }
4139
4140 /* See target.h. */
4141
4142 void
4143 target_goto_record_end (void)
4144 {
4145 current_inferior ()->top_target ()->goto_record_end ();
4146 }
4147
4148 /* See target.h. */
4149
4150 void
4151 target_goto_record (ULONGEST insn)
4152 {
4153 current_inferior ()->top_target ()->goto_record (insn);
4154 }
4155
4156 /* See target.h. */
4157
4158 void
4159 target_insn_history (int size, gdb_disassembly_flags flags)
4160 {
4161 current_inferior ()->top_target ()->insn_history (size, flags);
4162 }
4163
4164 /* See target.h. */
4165
4166 void
4167 target_insn_history_from (ULONGEST from, int size,
4168 gdb_disassembly_flags flags)
4169 {
4170 current_inferior ()->top_target ()->insn_history_from (from, size, flags);
4171 }
4172
4173 /* See target.h. */
4174
4175 void
4176 target_insn_history_range (ULONGEST begin, ULONGEST end,
4177 gdb_disassembly_flags flags)
4178 {
4179 current_inferior ()->top_target ()->insn_history_range (begin, end, flags);
4180 }
4181
4182 /* See target.h. */
4183
4184 void
4185 target_call_history (int size, record_print_flags flags)
4186 {
4187 current_inferior ()->top_target ()->call_history (size, flags);
4188 }
4189
4190 /* See target.h. */
4191
4192 void
4193 target_call_history_from (ULONGEST begin, int size, record_print_flags flags)
4194 {
4195 current_inferior ()->top_target ()->call_history_from (begin, size, flags);
4196 }
4197
4198 /* See target.h. */
4199
4200 void
4201 target_call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
4202 {
4203 current_inferior ()->top_target ()->call_history_range (begin, end, flags);
4204 }
4205
4206 /* See target.h. */
4207
4208 const struct frame_unwind *
4209 target_get_unwinder (void)
4210 {
4211 return current_inferior ()->top_target ()->get_unwinder ();
4212 }
4213
4214 /* See target.h. */
4215
4216 const struct frame_unwind *
4217 target_get_tailcall_unwinder (void)
4218 {
4219 return current_inferior ()->top_target ()->get_tailcall_unwinder ();
4220 }
4221
4222 /* See target.h. */
4223
4224 void
4225 target_prepare_to_generate_core (void)
4226 {
4227 current_inferior ()->top_target ()->prepare_to_generate_core ();
4228 }
4229
4230 /* See target.h. */
4231
4232 void
4233 target_done_generating_core (void)
4234 {
4235 current_inferior ()->top_target ()->done_generating_core ();
4236 }
4237
4238 \f
4239
4240 static char targ_desc[] =
4241 "Names of targets and files being debugged.\nShows the entire \
4242 stack of targets currently in use (including the exec-file,\n\
4243 core-file, and process, if any), as well as the symbol file name.";
4244
4245 static void
4246 default_rcmd (struct target_ops *self, const char *command,
4247 struct ui_file *output)
4248 {
4249 error (_("\"monitor\" command not supported by this target."));
4250 }
4251
4252 static void
4253 do_monitor_command (const char *cmd, int from_tty)
4254 {
4255 target_rcmd (cmd, gdb_stdtarg);
4256 }
4257
4258 /* Erases all the memory regions marked as flash. CMD and FROM_TTY are
4259 ignored. */
4260
4261 void
4262 flash_erase_command (const char *cmd, int from_tty)
4263 {
4264 /* Used to communicate termination of flash operations to the target. */
4265 bool found_flash_region = false;
4266 struct gdbarch *gdbarch = target_gdbarch ();
4267
4268 std::vector<mem_region> mem_regions = target_memory_map ();
4269
4270 /* Iterate over all memory regions. */
4271 for (const mem_region &m : mem_regions)
4272 {
4273 /* Is this a flash memory region? */
4274 if (m.attrib.mode == MEM_FLASH)
4275 {
4276 found_flash_region = true;
4277 target_flash_erase (m.lo, m.hi - m.lo);
4278
4279 ui_out_emit_tuple tuple_emitter (current_uiout, "erased-regions");
4280
4281 current_uiout->message (_("Erasing flash memory region at address "));
4282 current_uiout->field_core_addr ("address", gdbarch, m.lo);
4283 current_uiout->message (", size = ");
4284 current_uiout->field_string ("size", hex_string (m.hi - m.lo));
4285 current_uiout->message ("\n");
4286 }
4287 }
4288
4289 /* Did we do any flash operations? If so, we need to finalize them. */
4290 if (found_flash_region)
4291 target_flash_done ();
4292 else
4293 current_uiout->message (_("No flash memory regions found.\n"));
4294 }
4295
4296 /* Print the name of each layers of our target stack. */
4297
4298 static void
4299 maintenance_print_target_stack (const char *cmd, int from_tty)
4300 {
4301 printf_filtered (_("The current target stack is:\n"));
4302
4303 for (target_ops *t = current_inferior ()->top_target ();
4304 t != NULL;
4305 t = t->beneath ())
4306 {
4307 if (t->stratum () == debug_stratum)
4308 continue;
4309 printf_filtered (" - %s (%s)\n", t->shortname (), t->longname ());
4310 }
4311 }
4312
4313 /* See target.h. */
4314
4315 void
4316 target_async (int enable)
4317 {
4318 infrun_async (enable);
4319 current_inferior ()->top_target ()->async (enable);
4320 }
4321
4322 /* See target.h. */
4323
4324 void
4325 target_thread_events (int enable)
4326 {
4327 current_inferior ()->top_target ()->thread_events (enable);
4328 }
4329
4330 /* Controls if targets can report that they can/are async. This is
4331 just for maintainers to use when debugging gdb. */
4332 bool target_async_permitted = true;
4333
4334 /* The set command writes to this variable. If the inferior is
4335 executing, target_async_permitted is *not* updated. */
4336 static bool target_async_permitted_1 = true;
4337
4338 static void
4339 maint_set_target_async_command (const char *args, int from_tty,
4340 struct cmd_list_element *c)
4341 {
4342 if (have_live_inferiors ())
4343 {
4344 target_async_permitted_1 = target_async_permitted;
4345 error (_("Cannot change this setting while the inferior is running."));
4346 }
4347
4348 target_async_permitted = target_async_permitted_1;
4349 }
4350
4351 static void
4352 maint_show_target_async_command (struct ui_file *file, int from_tty,
4353 struct cmd_list_element *c,
4354 const char *value)
4355 {
4356 fprintf_filtered (file,
4357 _("Controlling the inferior in "
4358 "asynchronous mode is %s.\n"), value);
4359 }
4360
4361 /* Return true if the target operates in non-stop mode even with "set
4362 non-stop off". */
4363
4364 static int
4365 target_always_non_stop_p (void)
4366 {
4367 return current_inferior ()->top_target ()->always_non_stop_p ();
4368 }
4369
4370 /* See target.h. */
4371
4372 bool
4373 target_is_non_stop_p ()
4374 {
4375 return ((non_stop
4376 || target_non_stop_enabled == AUTO_BOOLEAN_TRUE
4377 || (target_non_stop_enabled == AUTO_BOOLEAN_AUTO
4378 && target_always_non_stop_p ()))
4379 && target_can_async_p ());
4380 }
4381
4382 /* See target.h. */
4383
4384 bool
4385 exists_non_stop_target ()
4386 {
4387 if (target_is_non_stop_p ())
4388 return true;
4389
4390 scoped_restore_current_thread restore_thread;
4391
4392 for (inferior *inf : all_inferiors ())
4393 {
4394 switch_to_inferior_no_thread (inf);
4395 if (target_is_non_stop_p ())
4396 return true;
4397 }
4398
4399 return false;
4400 }
4401
4402 /* Controls if targets can report that they always run in non-stop
4403 mode. This is just for maintainers to use when debugging gdb. */
4404 enum auto_boolean target_non_stop_enabled = AUTO_BOOLEAN_AUTO;
4405
4406 /* The set command writes to this variable. If the inferior is
4407 executing, target_non_stop_enabled is *not* updated. */
4408 static enum auto_boolean target_non_stop_enabled_1 = AUTO_BOOLEAN_AUTO;
4409
4410 /* Implementation of "maint set target-non-stop". */
4411
4412 static void
4413 maint_set_target_non_stop_command (const char *args, int from_tty,
4414 struct cmd_list_element *c)
4415 {
4416 if (have_live_inferiors ())
4417 {
4418 target_non_stop_enabled_1 = target_non_stop_enabled;
4419 error (_("Cannot change this setting while the inferior is running."));
4420 }
4421
4422 target_non_stop_enabled = target_non_stop_enabled_1;
4423 }
4424
4425 /* Implementation of "maint show target-non-stop". */
4426
4427 static void
4428 maint_show_target_non_stop_command (struct ui_file *file, int from_tty,
4429 struct cmd_list_element *c,
4430 const char *value)
4431 {
4432 if (target_non_stop_enabled == AUTO_BOOLEAN_AUTO)
4433 fprintf_filtered (file,
4434 _("Whether the target is always in non-stop mode "
4435 "is %s (currently %s).\n"), value,
4436 target_always_non_stop_p () ? "on" : "off");
4437 else
4438 fprintf_filtered (file,
4439 _("Whether the target is always in non-stop mode "
4440 "is %s.\n"), value);
4441 }
4442
4443 /* Temporary copies of permission settings. */
4444
4445 static bool may_write_registers_1 = true;
4446 static bool may_write_memory_1 = true;
4447 static bool may_insert_breakpoints_1 = true;
4448 static bool may_insert_tracepoints_1 = true;
4449 static bool may_insert_fast_tracepoints_1 = true;
4450 static bool may_stop_1 = true;
4451
4452 /* Make the user-set values match the real values again. */
4453
4454 void
4455 update_target_permissions (void)
4456 {
4457 may_write_registers_1 = may_write_registers;
4458 may_write_memory_1 = may_write_memory;
4459 may_insert_breakpoints_1 = may_insert_breakpoints;
4460 may_insert_tracepoints_1 = may_insert_tracepoints;
4461 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4462 may_stop_1 = may_stop;
4463 }
4464
4465 /* The one function handles (most of) the permission flags in the same
4466 way. */
4467
4468 static void
4469 set_target_permissions (const char *args, int from_tty,
4470 struct cmd_list_element *c)
4471 {
4472 if (target_has_execution ())
4473 {
4474 update_target_permissions ();
4475 error (_("Cannot change this setting while the inferior is running."));
4476 }
4477
4478 /* Make the real values match the user-changed values. */
4479 may_write_registers = may_write_registers_1;
4480 may_insert_breakpoints = may_insert_breakpoints_1;
4481 may_insert_tracepoints = may_insert_tracepoints_1;
4482 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4483 may_stop = may_stop_1;
4484 update_observer_mode ();
4485 }
4486
4487 /* Set memory write permission independently of observer mode. */
4488
4489 static void
4490 set_write_memory_permission (const char *args, int from_tty,
4491 struct cmd_list_element *c)
4492 {
4493 /* Make the real values match the user-changed values. */
4494 may_write_memory = may_write_memory_1;
4495 update_observer_mode ();
4496 }
4497
4498 void _initialize_target ();
4499
4500 void
4501 _initialize_target ()
4502 {
4503 the_debug_target = new debug_target ();
4504
4505 add_info ("target", info_target_command, targ_desc);
4506 add_info ("files", info_target_command, targ_desc);
4507
4508 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4509 Set target debugging."), _("\
4510 Show target debugging."), _("\
4511 When non-zero, target debugging is enabled. Higher numbers are more\n\
4512 verbose."),
4513 set_targetdebug,
4514 show_targetdebug,
4515 &setdebuglist, &showdebuglist);
4516
4517 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4518 &trust_readonly, _("\
4519 Set mode for reading from readonly sections."), _("\
4520 Show mode for reading from readonly sections."), _("\
4521 When this mode is on, memory reads from readonly sections (such as .text)\n\
4522 will be read from the object file instead of from the target. This will\n\
4523 result in significant performance improvement for remote targets."),
4524 NULL,
4525 show_trust_readonly,
4526 &setlist, &showlist);
4527
4528 add_com ("monitor", class_obscure, do_monitor_command,
4529 _("Send a command to the remote monitor (remote targets only)."));
4530
4531 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4532 _("Print the name of each layer of the internal target stack."),
4533 &maintenanceprintlist);
4534
4535 add_setshow_boolean_cmd ("target-async", no_class,
4536 &target_async_permitted_1, _("\
4537 Set whether gdb controls the inferior in asynchronous mode."), _("\
4538 Show whether gdb controls the inferior in asynchronous mode."), _("\
4539 Tells gdb whether to control the inferior in asynchronous mode."),
4540 maint_set_target_async_command,
4541 maint_show_target_async_command,
4542 &maintenance_set_cmdlist,
4543 &maintenance_show_cmdlist);
4544
4545 add_setshow_auto_boolean_cmd ("target-non-stop", no_class,
4546 &target_non_stop_enabled_1, _("\
4547 Set whether gdb always controls the inferior in non-stop mode."), _("\
4548 Show whether gdb always controls the inferior in non-stop mode."), _("\
4549 Tells gdb whether to control the inferior in non-stop mode."),
4550 maint_set_target_non_stop_command,
4551 maint_show_target_non_stop_command,
4552 &maintenance_set_cmdlist,
4553 &maintenance_show_cmdlist);
4554
4555 add_setshow_boolean_cmd ("may-write-registers", class_support,
4556 &may_write_registers_1, _("\
4557 Set permission to write into registers."), _("\
4558 Show permission to write into registers."), _("\
4559 When this permission is on, GDB may write into the target's registers.\n\
4560 Otherwise, any sort of write attempt will result in an error."),
4561 set_target_permissions, NULL,
4562 &setlist, &showlist);
4563
4564 add_setshow_boolean_cmd ("may-write-memory", class_support,
4565 &may_write_memory_1, _("\
4566 Set permission to write into target memory."), _("\
4567 Show permission to write into target memory."), _("\
4568 When this permission is on, GDB may write into the target's memory.\n\
4569 Otherwise, any sort of write attempt will result in an error."),
4570 set_write_memory_permission, NULL,
4571 &setlist, &showlist);
4572
4573 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4574 &may_insert_breakpoints_1, _("\
4575 Set permission to insert breakpoints in the target."), _("\
4576 Show permission to insert breakpoints in the target."), _("\
4577 When this permission is on, GDB may insert breakpoints in the program.\n\
4578 Otherwise, any sort of insertion attempt will result in an error."),
4579 set_target_permissions, NULL,
4580 &setlist, &showlist);
4581
4582 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4583 &may_insert_tracepoints_1, _("\
4584 Set permission to insert tracepoints in the target."), _("\
4585 Show permission to insert tracepoints in the target."), _("\
4586 When this permission is on, GDB may insert tracepoints in the program.\n\
4587 Otherwise, any sort of insertion attempt will result in an error."),
4588 set_target_permissions, NULL,
4589 &setlist, &showlist);
4590
4591 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4592 &may_insert_fast_tracepoints_1, _("\
4593 Set permission to insert fast tracepoints in the target."), _("\
4594 Show permission to insert fast tracepoints in the target."), _("\
4595 When this permission is on, GDB may insert fast tracepoints.\n\
4596 Otherwise, any sort of insertion attempt will result in an error."),
4597 set_target_permissions, NULL,
4598 &setlist, &showlist);
4599
4600 add_setshow_boolean_cmd ("may-interrupt", class_support,
4601 &may_stop_1, _("\
4602 Set permission to interrupt or signal the target."), _("\
4603 Show permission to interrupt or signal the target."), _("\
4604 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4605 Otherwise, any attempt to interrupt or stop will be ignored."),
4606 set_target_permissions, NULL,
4607 &setlist, &showlist);
4608
4609 add_com ("flash-erase", no_class, flash_erase_command,
4610 _("Erase all flash memory regions."));
4611
4612 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
4613 &auto_connect_native_target, _("\
4614 Set whether GDB may automatically connect to the native target."), _("\
4615 Show whether GDB may automatically connect to the native target."), _("\
4616 When on, and GDB is not connected to a target yet, GDB\n\
4617 attempts \"run\" and other commands with the native target."),
4618 NULL, show_auto_connect_native_target,
4619 &setlist, &showlist);
4620 }
This page took 0.129993 seconds and 4 git commands to generate.