gdb/testsuite/
[deliverable/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
4 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 Contributed by Cygnus Support.
8
9 This file is part of GDB.
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23
24 #include "defs.h"
25 #include <errno.h>
26 #include "gdb_string.h"
27 #include "target.h"
28 #include "gdbcmd.h"
29 #include "symtab.h"
30 #include "inferior.h"
31 #include "bfd.h"
32 #include "symfile.h"
33 #include "objfiles.h"
34 #include "gdb_wait.h"
35 #include "dcache.h"
36 #include <signal.h>
37 #include "regcache.h"
38 #include "gdb_assert.h"
39 #include "gdbcore.h"
40 #include "exceptions.h"
41 #include "target-descriptions.h"
42 #include "gdbthread.h"
43 #include "solib.h"
44 #include "exec.h"
45 #include "inline-frame.h"
46 #include "tracepoint.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
56
57 static void tcomplain (void) ATTRIBUTE_NORETURN;
58
59 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
60
61 static int return_zero (void);
62
63 static int return_one (void);
64
65 static int return_minus_one (void);
66
67 void target_ignore (void);
68
69 static void target_command (char *, int);
70
71 static struct target_ops *find_default_run_target (char *);
72
73 static LONGEST default_xfer_partial (struct target_ops *ops,
74 enum target_object object,
75 const char *annex, gdb_byte *readbuf,
76 const gdb_byte *writebuf,
77 ULONGEST offset, LONGEST len);
78
79 static LONGEST current_xfer_partial (struct target_ops *ops,
80 enum target_object object,
81 const char *annex, gdb_byte *readbuf,
82 const gdb_byte *writebuf,
83 ULONGEST offset, LONGEST len);
84
85 static LONGEST target_xfer_partial (struct target_ops *ops,
86 enum target_object object,
87 const char *annex,
88 void *readbuf, const void *writebuf,
89 ULONGEST offset, LONGEST len);
90
91 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
92 ptid_t ptid);
93
94 static void init_dummy_target (void);
95
96 static struct target_ops debug_target;
97
98 static void debug_to_open (char *, int);
99
100 static void debug_to_prepare_to_store (struct regcache *);
101
102 static void debug_to_files_info (struct target_ops *);
103
104 static int debug_to_insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *);
106
107 static int debug_to_remove_breakpoint (struct gdbarch *,
108 struct bp_target_info *);
109
110 static int debug_to_can_use_hw_breakpoint (int, int, int);
111
112 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
116 struct bp_target_info *);
117
118 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
119 struct expression *);
120
121 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
122 struct expression *);
123
124 static int debug_to_stopped_by_watchpoint (void);
125
126 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
127
128 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
129 CORE_ADDR, CORE_ADDR, int);
130
131 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
132
133 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
134 struct expression *);
135
136 static void debug_to_terminal_init (void);
137
138 static void debug_to_terminal_inferior (void);
139
140 static void debug_to_terminal_ours_for_output (void);
141
142 static void debug_to_terminal_save_ours (void);
143
144 static void debug_to_terminal_ours (void);
145
146 static void debug_to_terminal_info (char *, int);
147
148 static void debug_to_load (char *, int);
149
150 static int debug_to_can_run (void);
151
152 static void debug_to_stop (ptid_t);
153
154 /* Pointer to array of target architecture structures; the size of the
155 array; the current index into the array; the allocated size of the
156 array. */
157 struct target_ops **target_structs;
158 unsigned target_struct_size;
159 unsigned target_struct_index;
160 unsigned target_struct_allocsize;
161 #define DEFAULT_ALLOCSIZE 10
162
163 /* The initial current target, so that there is always a semi-valid
164 current target. */
165
166 static struct target_ops dummy_target;
167
168 /* Top of target stack. */
169
170 static struct target_ops *target_stack;
171
172 /* The target structure we are currently using to talk to a process
173 or file or whatever "inferior" we have. */
174
175 struct target_ops current_target;
176
177 /* Command list for target. */
178
179 static struct cmd_list_element *targetlist = NULL;
180
181 /* Nonzero if we should trust readonly sections from the
182 executable when reading memory. */
183
184 static int trust_readonly = 0;
185
186 /* Nonzero if we should show true memory content including
187 memory breakpoint inserted by gdb. */
188
189 static int show_memory_breakpoints = 0;
190
191 /* These globals control whether GDB attempts to perform these
192 operations; they are useful for targets that need to prevent
193 inadvertant disruption, such as in non-stop mode. */
194
195 int may_write_registers = 1;
196
197 int may_write_memory = 1;
198
199 int may_insert_breakpoints = 1;
200
201 int may_insert_tracepoints = 1;
202
203 int may_insert_fast_tracepoints = 1;
204
205 int may_stop = 1;
206
207 /* Non-zero if we want to see trace of target level stuff. */
208
209 static int targetdebug = 0;
210 static void
211 show_targetdebug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213 {
214 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
215 }
216
217 static void setup_target_debug (void);
218
219 /* The option sets this. */
220 static int stack_cache_enabled_p_1 = 1;
221 /* And set_stack_cache_enabled_p updates this.
222 The reason for the separation is so that we don't flush the cache for
223 on->on transitions. */
224 static int stack_cache_enabled_p = 1;
225
226 /* This is called *after* the stack-cache has been set.
227 Flush the cache for off->on and on->off transitions.
228 There's no real need to flush the cache for on->off transitions,
229 except cleanliness. */
230
231 static void
232 set_stack_cache_enabled_p (char *args, int from_tty,
233 struct cmd_list_element *c)
234 {
235 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
236 target_dcache_invalidate ();
237
238 stack_cache_enabled_p = stack_cache_enabled_p_1;
239 }
240
241 static void
242 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
243 struct cmd_list_element *c, const char *value)
244 {
245 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
246 }
247
248 /* Cache of memory operations, to speed up remote access. */
249 static DCACHE *target_dcache;
250
251 /* Invalidate the target dcache. */
252
253 void
254 target_dcache_invalidate (void)
255 {
256 dcache_invalidate (target_dcache);
257 }
258
259 /* The user just typed 'target' without the name of a target. */
260
261 static void
262 target_command (char *arg, int from_tty)
263 {
264 fputs_filtered ("Argument required (target name). Try `help target'\n",
265 gdb_stdout);
266 }
267
268 /* Default target_has_* methods for process_stratum targets. */
269
270 int
271 default_child_has_all_memory (struct target_ops *ops)
272 {
273 /* If no inferior selected, then we can't read memory here. */
274 if (ptid_equal (inferior_ptid, null_ptid))
275 return 0;
276
277 return 1;
278 }
279
280 int
281 default_child_has_memory (struct target_ops *ops)
282 {
283 /* If no inferior selected, then we can't read memory here. */
284 if (ptid_equal (inferior_ptid, null_ptid))
285 return 0;
286
287 return 1;
288 }
289
290 int
291 default_child_has_stack (struct target_ops *ops)
292 {
293 /* If no inferior selected, there's no stack. */
294 if (ptid_equal (inferior_ptid, null_ptid))
295 return 0;
296
297 return 1;
298 }
299
300 int
301 default_child_has_registers (struct target_ops *ops)
302 {
303 /* Can't read registers from no inferior. */
304 if (ptid_equal (inferior_ptid, null_ptid))
305 return 0;
306
307 return 1;
308 }
309
310 int
311 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
312 {
313 /* If there's no thread selected, then we can't make it run through
314 hoops. */
315 if (ptid_equal (the_ptid, null_ptid))
316 return 0;
317
318 return 1;
319 }
320
321
322 int
323 target_has_all_memory_1 (void)
324 {
325 struct target_ops *t;
326
327 for (t = current_target.beneath; t != NULL; t = t->beneath)
328 if (t->to_has_all_memory (t))
329 return 1;
330
331 return 0;
332 }
333
334 int
335 target_has_memory_1 (void)
336 {
337 struct target_ops *t;
338
339 for (t = current_target.beneath; t != NULL; t = t->beneath)
340 if (t->to_has_memory (t))
341 return 1;
342
343 return 0;
344 }
345
346 int
347 target_has_stack_1 (void)
348 {
349 struct target_ops *t;
350
351 for (t = current_target.beneath; t != NULL; t = t->beneath)
352 if (t->to_has_stack (t))
353 return 1;
354
355 return 0;
356 }
357
358 int
359 target_has_registers_1 (void)
360 {
361 struct target_ops *t;
362
363 for (t = current_target.beneath; t != NULL; t = t->beneath)
364 if (t->to_has_registers (t))
365 return 1;
366
367 return 0;
368 }
369
370 int
371 target_has_execution_1 (ptid_t the_ptid)
372 {
373 struct target_ops *t;
374
375 for (t = current_target.beneath; t != NULL; t = t->beneath)
376 if (t->to_has_execution (t, the_ptid))
377 return 1;
378
379 return 0;
380 }
381
382 int
383 target_has_execution_current (void)
384 {
385 return target_has_execution_1 (inferior_ptid);
386 }
387
388 /* Add a possible target architecture to the list. */
389
390 void
391 add_target (struct target_ops *t)
392 {
393 /* Provide default values for all "must have" methods. */
394 if (t->to_xfer_partial == NULL)
395 t->to_xfer_partial = default_xfer_partial;
396
397 if (t->to_has_all_memory == NULL)
398 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
399
400 if (t->to_has_memory == NULL)
401 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
402
403 if (t->to_has_stack == NULL)
404 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
405
406 if (t->to_has_registers == NULL)
407 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
408
409 if (t->to_has_execution == NULL)
410 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
411
412 if (!target_structs)
413 {
414 target_struct_allocsize = DEFAULT_ALLOCSIZE;
415 target_structs = (struct target_ops **) xmalloc
416 (target_struct_allocsize * sizeof (*target_structs));
417 }
418 if (target_struct_size >= target_struct_allocsize)
419 {
420 target_struct_allocsize *= 2;
421 target_structs = (struct target_ops **)
422 xrealloc ((char *) target_structs,
423 target_struct_allocsize * sizeof (*target_structs));
424 }
425 target_structs[target_struct_size++] = t;
426
427 if (targetlist == NULL)
428 add_prefix_cmd ("target", class_run, target_command, _("\
429 Connect to a target machine or process.\n\
430 The first argument is the type or protocol of the target machine.\n\
431 Remaining arguments are interpreted by the target protocol. For more\n\
432 information on the arguments for a particular protocol, type\n\
433 `help target ' followed by the protocol name."),
434 &targetlist, "target ", 0, &cmdlist);
435 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
436 }
437
438 /* Stub functions */
439
440 void
441 target_ignore (void)
442 {
443 }
444
445 void
446 target_kill (void)
447 {
448 struct target_ops *t;
449
450 for (t = current_target.beneath; t != NULL; t = t->beneath)
451 if (t->to_kill != NULL)
452 {
453 if (targetdebug)
454 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
455
456 t->to_kill (t);
457 return;
458 }
459
460 noprocess ();
461 }
462
463 void
464 target_load (char *arg, int from_tty)
465 {
466 target_dcache_invalidate ();
467 (*current_target.to_load) (arg, from_tty);
468 }
469
470 void
471 target_create_inferior (char *exec_file, char *args,
472 char **env, int from_tty)
473 {
474 struct target_ops *t;
475
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
477 {
478 if (t->to_create_inferior != NULL)
479 {
480 t->to_create_inferior (t, exec_file, args, env, from_tty);
481 if (targetdebug)
482 fprintf_unfiltered (gdb_stdlog,
483 "target_create_inferior (%s, %s, xxx, %d)\n",
484 exec_file, args, from_tty);
485 return;
486 }
487 }
488
489 internal_error (__FILE__, __LINE__,
490 _("could not find a target to create inferior"));
491 }
492
493 void
494 target_terminal_inferior (void)
495 {
496 /* A background resume (``run&'') should leave GDB in control of the
497 terminal. Use target_can_async_p, not target_is_async_p, since at
498 this point the target is not async yet. However, if sync_execution
499 is not set, we know it will become async prior to resume. */
500 if (target_can_async_p () && !sync_execution)
501 return;
502
503 /* If GDB is resuming the inferior in the foreground, install
504 inferior's terminal modes. */
505 (*current_target.to_terminal_inferior) ();
506 }
507
508 static int
509 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
510 struct target_ops *t)
511 {
512 errno = EIO; /* Can't read/write this location. */
513 return 0; /* No bytes handled. */
514 }
515
516 static void
517 tcomplain (void)
518 {
519 error (_("You can't do that when your target is `%s'"),
520 current_target.to_shortname);
521 }
522
523 void
524 noprocess (void)
525 {
526 error (_("You can't do that without a process to debug."));
527 }
528
529 static void
530 default_terminal_info (char *args, int from_tty)
531 {
532 printf_unfiltered (_("No saved terminal information.\n"));
533 }
534
535 /* A default implementation for the to_get_ada_task_ptid target method.
536
537 This function builds the PTID by using both LWP and TID as part of
538 the PTID lwp and tid elements. The pid used is the pid of the
539 inferior_ptid. */
540
541 static ptid_t
542 default_get_ada_task_ptid (long lwp, long tid)
543 {
544 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
545 }
546
547 /* Go through the target stack from top to bottom, copying over zero
548 entries in current_target, then filling in still empty entries. In
549 effect, we are doing class inheritance through the pushed target
550 vectors.
551
552 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
553 is currently implemented, is that it discards any knowledge of
554 which target an inherited method originally belonged to.
555 Consequently, new new target methods should instead explicitly and
556 locally search the target stack for the target that can handle the
557 request. */
558
559 static void
560 update_current_target (void)
561 {
562 struct target_ops *t;
563
564 /* First, reset current's contents. */
565 memset (&current_target, 0, sizeof (current_target));
566
567 #define INHERIT(FIELD, TARGET) \
568 if (!current_target.FIELD) \
569 current_target.FIELD = (TARGET)->FIELD
570
571 for (t = target_stack; t; t = t->beneath)
572 {
573 INHERIT (to_shortname, t);
574 INHERIT (to_longname, t);
575 INHERIT (to_doc, t);
576 /* Do not inherit to_open. */
577 /* Do not inherit to_close. */
578 /* Do not inherit to_attach. */
579 INHERIT (to_post_attach, t);
580 INHERIT (to_attach_no_wait, t);
581 /* Do not inherit to_detach. */
582 /* Do not inherit to_disconnect. */
583 /* Do not inherit to_resume. */
584 /* Do not inherit to_wait. */
585 /* Do not inherit to_fetch_registers. */
586 /* Do not inherit to_store_registers. */
587 INHERIT (to_prepare_to_store, t);
588 INHERIT (deprecated_xfer_memory, t);
589 INHERIT (to_files_info, t);
590 INHERIT (to_insert_breakpoint, t);
591 INHERIT (to_remove_breakpoint, t);
592 INHERIT (to_can_use_hw_breakpoint, t);
593 INHERIT (to_insert_hw_breakpoint, t);
594 INHERIT (to_remove_hw_breakpoint, t);
595 /* Do not inherit to_ranged_break_num_registers. */
596 INHERIT (to_insert_watchpoint, t);
597 INHERIT (to_remove_watchpoint, t);
598 INHERIT (to_stopped_data_address, t);
599 INHERIT (to_have_steppable_watchpoint, t);
600 INHERIT (to_have_continuable_watchpoint, t);
601 INHERIT (to_stopped_by_watchpoint, t);
602 INHERIT (to_watchpoint_addr_within_range, t);
603 INHERIT (to_region_ok_for_hw_watchpoint, t);
604 INHERIT (to_can_accel_watchpoint_condition, t);
605 INHERIT (to_terminal_init, t);
606 INHERIT (to_terminal_inferior, t);
607 INHERIT (to_terminal_ours_for_output, t);
608 INHERIT (to_terminal_ours, t);
609 INHERIT (to_terminal_save_ours, t);
610 INHERIT (to_terminal_info, t);
611 /* Do not inherit to_kill. */
612 INHERIT (to_load, t);
613 /* Do no inherit to_create_inferior. */
614 INHERIT (to_post_startup_inferior, t);
615 INHERIT (to_insert_fork_catchpoint, t);
616 INHERIT (to_remove_fork_catchpoint, t);
617 INHERIT (to_insert_vfork_catchpoint, t);
618 INHERIT (to_remove_vfork_catchpoint, t);
619 /* Do not inherit to_follow_fork. */
620 INHERIT (to_insert_exec_catchpoint, t);
621 INHERIT (to_remove_exec_catchpoint, t);
622 INHERIT (to_set_syscall_catchpoint, t);
623 INHERIT (to_has_exited, t);
624 /* Do not inherit to_mourn_inferior. */
625 INHERIT (to_can_run, t);
626 /* Do not inherit to_pass_signals. */
627 /* Do not inherit to_thread_alive. */
628 /* Do not inherit to_find_new_threads. */
629 /* Do not inherit to_pid_to_str. */
630 INHERIT (to_extra_thread_info, t);
631 INHERIT (to_thread_name, t);
632 INHERIT (to_stop, t);
633 /* Do not inherit to_xfer_partial. */
634 INHERIT (to_rcmd, t);
635 INHERIT (to_pid_to_exec_file, t);
636 INHERIT (to_log_command, t);
637 INHERIT (to_stratum, t);
638 /* Do not inherit to_has_all_memory. */
639 /* Do not inherit to_has_memory. */
640 /* Do not inherit to_has_stack. */
641 /* Do not inherit to_has_registers. */
642 /* Do not inherit to_has_execution. */
643 INHERIT (to_has_thread_control, t);
644 INHERIT (to_can_async_p, t);
645 INHERIT (to_is_async_p, t);
646 INHERIT (to_async, t);
647 INHERIT (to_async_mask, t);
648 INHERIT (to_find_memory_regions, t);
649 INHERIT (to_make_corefile_notes, t);
650 INHERIT (to_get_bookmark, t);
651 INHERIT (to_goto_bookmark, t);
652 /* Do not inherit to_get_thread_local_address. */
653 INHERIT (to_can_execute_reverse, t);
654 INHERIT (to_thread_architecture, t);
655 /* Do not inherit to_read_description. */
656 INHERIT (to_get_ada_task_ptid, t);
657 /* Do not inherit to_search_memory. */
658 INHERIT (to_supports_multi_process, t);
659 INHERIT (to_trace_init, t);
660 INHERIT (to_download_tracepoint, t);
661 INHERIT (to_download_trace_state_variable, t);
662 INHERIT (to_trace_set_readonly_regions, t);
663 INHERIT (to_trace_start, t);
664 INHERIT (to_get_trace_status, t);
665 INHERIT (to_trace_stop, t);
666 INHERIT (to_trace_find, t);
667 INHERIT (to_get_trace_state_variable_value, t);
668 INHERIT (to_save_trace_data, t);
669 INHERIT (to_upload_tracepoints, t);
670 INHERIT (to_upload_trace_state_variables, t);
671 INHERIT (to_get_raw_trace_data, t);
672 INHERIT (to_set_disconnected_tracing, t);
673 INHERIT (to_set_circular_trace_buffer, t);
674 INHERIT (to_get_tib_address, t);
675 INHERIT (to_set_permissions, t);
676 INHERIT (to_static_tracepoint_marker_at, t);
677 INHERIT (to_static_tracepoint_markers_by_strid, t);
678 INHERIT (to_traceframe_info, t);
679 INHERIT (to_magic, t);
680 /* Do not inherit to_memory_map. */
681 /* Do not inherit to_flash_erase. */
682 /* Do not inherit to_flash_done. */
683 }
684 #undef INHERIT
685
686 /* Clean up a target struct so it no longer has any zero pointers in
687 it. Some entries are defaulted to a method that print an error,
688 others are hard-wired to a standard recursive default. */
689
690 #define de_fault(field, value) \
691 if (!current_target.field) \
692 current_target.field = value
693
694 de_fault (to_open,
695 (void (*) (char *, int))
696 tcomplain);
697 de_fault (to_close,
698 (void (*) (int))
699 target_ignore);
700 de_fault (to_post_attach,
701 (void (*) (int))
702 target_ignore);
703 de_fault (to_prepare_to_store,
704 (void (*) (struct regcache *))
705 noprocess);
706 de_fault (deprecated_xfer_memory,
707 (int (*) (CORE_ADDR, gdb_byte *, int, int,
708 struct mem_attrib *, struct target_ops *))
709 nomemory);
710 de_fault (to_files_info,
711 (void (*) (struct target_ops *))
712 target_ignore);
713 de_fault (to_insert_breakpoint,
714 memory_insert_breakpoint);
715 de_fault (to_remove_breakpoint,
716 memory_remove_breakpoint);
717 de_fault (to_can_use_hw_breakpoint,
718 (int (*) (int, int, int))
719 return_zero);
720 de_fault (to_insert_hw_breakpoint,
721 (int (*) (struct gdbarch *, struct bp_target_info *))
722 return_minus_one);
723 de_fault (to_remove_hw_breakpoint,
724 (int (*) (struct gdbarch *, struct bp_target_info *))
725 return_minus_one);
726 de_fault (to_insert_watchpoint,
727 (int (*) (CORE_ADDR, int, int, struct expression *))
728 return_minus_one);
729 de_fault (to_remove_watchpoint,
730 (int (*) (CORE_ADDR, int, int, struct expression *))
731 return_minus_one);
732 de_fault (to_stopped_by_watchpoint,
733 (int (*) (void))
734 return_zero);
735 de_fault (to_stopped_data_address,
736 (int (*) (struct target_ops *, CORE_ADDR *))
737 return_zero);
738 de_fault (to_watchpoint_addr_within_range,
739 default_watchpoint_addr_within_range);
740 de_fault (to_region_ok_for_hw_watchpoint,
741 default_region_ok_for_hw_watchpoint);
742 de_fault (to_can_accel_watchpoint_condition,
743 (int (*) (CORE_ADDR, int, int, struct expression *))
744 return_zero);
745 de_fault (to_terminal_init,
746 (void (*) (void))
747 target_ignore);
748 de_fault (to_terminal_inferior,
749 (void (*) (void))
750 target_ignore);
751 de_fault (to_terminal_ours_for_output,
752 (void (*) (void))
753 target_ignore);
754 de_fault (to_terminal_ours,
755 (void (*) (void))
756 target_ignore);
757 de_fault (to_terminal_save_ours,
758 (void (*) (void))
759 target_ignore);
760 de_fault (to_terminal_info,
761 default_terminal_info);
762 de_fault (to_load,
763 (void (*) (char *, int))
764 tcomplain);
765 de_fault (to_post_startup_inferior,
766 (void (*) (ptid_t))
767 target_ignore);
768 de_fault (to_insert_fork_catchpoint,
769 (int (*) (int))
770 return_one);
771 de_fault (to_remove_fork_catchpoint,
772 (int (*) (int))
773 return_one);
774 de_fault (to_insert_vfork_catchpoint,
775 (int (*) (int))
776 return_one);
777 de_fault (to_remove_vfork_catchpoint,
778 (int (*) (int))
779 return_one);
780 de_fault (to_insert_exec_catchpoint,
781 (int (*) (int))
782 return_one);
783 de_fault (to_remove_exec_catchpoint,
784 (int (*) (int))
785 return_one);
786 de_fault (to_set_syscall_catchpoint,
787 (int (*) (int, int, int, int, int *))
788 return_one);
789 de_fault (to_has_exited,
790 (int (*) (int, int, int *))
791 return_zero);
792 de_fault (to_can_run,
793 return_zero);
794 de_fault (to_extra_thread_info,
795 (char *(*) (struct thread_info *))
796 return_zero);
797 de_fault (to_thread_name,
798 (char *(*) (struct thread_info *))
799 return_zero);
800 de_fault (to_stop,
801 (void (*) (ptid_t))
802 target_ignore);
803 current_target.to_xfer_partial = current_xfer_partial;
804 de_fault (to_rcmd,
805 (void (*) (char *, struct ui_file *))
806 tcomplain);
807 de_fault (to_pid_to_exec_file,
808 (char *(*) (int))
809 return_zero);
810 de_fault (to_async,
811 (void (*) (void (*) (enum inferior_event_type, void*), void*))
812 tcomplain);
813 de_fault (to_async_mask,
814 (int (*) (int))
815 return_one);
816 de_fault (to_thread_architecture,
817 default_thread_architecture);
818 current_target.to_read_description = NULL;
819 de_fault (to_get_ada_task_ptid,
820 (ptid_t (*) (long, long))
821 default_get_ada_task_ptid);
822 de_fault (to_supports_multi_process,
823 (int (*) (void))
824 return_zero);
825 de_fault (to_trace_init,
826 (void (*) (void))
827 tcomplain);
828 de_fault (to_download_tracepoint,
829 (void (*) (struct breakpoint *))
830 tcomplain);
831 de_fault (to_download_trace_state_variable,
832 (void (*) (struct trace_state_variable *))
833 tcomplain);
834 de_fault (to_trace_set_readonly_regions,
835 (void (*) (void))
836 tcomplain);
837 de_fault (to_trace_start,
838 (void (*) (void))
839 tcomplain);
840 de_fault (to_get_trace_status,
841 (int (*) (struct trace_status *))
842 return_minus_one);
843 de_fault (to_trace_stop,
844 (void (*) (void))
845 tcomplain);
846 de_fault (to_trace_find,
847 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
848 return_minus_one);
849 de_fault (to_get_trace_state_variable_value,
850 (int (*) (int, LONGEST *))
851 return_zero);
852 de_fault (to_save_trace_data,
853 (int (*) (const char *))
854 tcomplain);
855 de_fault (to_upload_tracepoints,
856 (int (*) (struct uploaded_tp **))
857 return_zero);
858 de_fault (to_upload_trace_state_variables,
859 (int (*) (struct uploaded_tsv **))
860 return_zero);
861 de_fault (to_get_raw_trace_data,
862 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
863 tcomplain);
864 de_fault (to_set_disconnected_tracing,
865 (void (*) (int))
866 target_ignore);
867 de_fault (to_set_circular_trace_buffer,
868 (void (*) (int))
869 target_ignore);
870 de_fault (to_get_tib_address,
871 (int (*) (ptid_t, CORE_ADDR *))
872 tcomplain);
873 de_fault (to_set_permissions,
874 (void (*) (void))
875 target_ignore);
876 de_fault (to_static_tracepoint_marker_at,
877 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
878 return_zero);
879 de_fault (to_static_tracepoint_markers_by_strid,
880 (VEC(static_tracepoint_marker_p) * (*) (const char *))
881 tcomplain);
882 de_fault (to_traceframe_info,
883 (struct traceframe_info * (*) (void))
884 tcomplain);
885 #undef de_fault
886
887 /* Finally, position the target-stack beneath the squashed
888 "current_target". That way code looking for a non-inherited
889 target method can quickly and simply find it. */
890 current_target.beneath = target_stack;
891
892 if (targetdebug)
893 setup_target_debug ();
894 }
895
896 /* Push a new target type into the stack of the existing target accessors,
897 possibly superseding some of the existing accessors.
898
899 Rather than allow an empty stack, we always have the dummy target at
900 the bottom stratum, so we can call the function vectors without
901 checking them. */
902
903 void
904 push_target (struct target_ops *t)
905 {
906 struct target_ops **cur;
907
908 /* Check magic number. If wrong, it probably means someone changed
909 the struct definition, but not all the places that initialize one. */
910 if (t->to_magic != OPS_MAGIC)
911 {
912 fprintf_unfiltered (gdb_stderr,
913 "Magic number of %s target struct wrong\n",
914 t->to_shortname);
915 internal_error (__FILE__, __LINE__,
916 _("failed internal consistency check"));
917 }
918
919 /* Find the proper stratum to install this target in. */
920 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
921 {
922 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
923 break;
924 }
925
926 /* If there's already targets at this stratum, remove them. */
927 /* FIXME: cagney/2003-10-15: I think this should be popping all
928 targets to CUR, and not just those at this stratum level. */
929 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
930 {
931 /* There's already something at this stratum level. Close it,
932 and un-hook it from the stack. */
933 struct target_ops *tmp = (*cur);
934
935 (*cur) = (*cur)->beneath;
936 tmp->beneath = NULL;
937 target_close (tmp, 0);
938 }
939
940 /* We have removed all targets in our stratum, now add the new one. */
941 t->beneath = (*cur);
942 (*cur) = t;
943
944 update_current_target ();
945 }
946
947 /* Remove a target_ops vector from the stack, wherever it may be.
948 Return how many times it was removed (0 or 1). */
949
950 int
951 unpush_target (struct target_ops *t)
952 {
953 struct target_ops **cur;
954 struct target_ops *tmp;
955
956 if (t->to_stratum == dummy_stratum)
957 internal_error (__FILE__, __LINE__,
958 _("Attempt to unpush the dummy target"));
959
960 /* Look for the specified target. Note that we assume that a target
961 can only occur once in the target stack. */
962
963 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
964 {
965 if ((*cur) == t)
966 break;
967 }
968
969 if ((*cur) == NULL)
970 return 0; /* Didn't find target_ops, quit now. */
971
972 /* NOTE: cagney/2003-12-06: In '94 the close call was made
973 unconditional by moving it to before the above check that the
974 target was in the target stack (something about "Change the way
975 pushing and popping of targets work to support target overlays
976 and inheritance"). This doesn't make much sense - only open
977 targets should be closed. */
978 target_close (t, 0);
979
980 /* Unchain the target. */
981 tmp = (*cur);
982 (*cur) = (*cur)->beneath;
983 tmp->beneath = NULL;
984
985 update_current_target ();
986
987 return 1;
988 }
989
990 void
991 pop_target (void)
992 {
993 target_close (target_stack, 0); /* Let it clean up. */
994 if (unpush_target (target_stack) == 1)
995 return;
996
997 fprintf_unfiltered (gdb_stderr,
998 "pop_target couldn't find target %s\n",
999 current_target.to_shortname);
1000 internal_error (__FILE__, __LINE__,
1001 _("failed internal consistency check"));
1002 }
1003
1004 void
1005 pop_all_targets_above (enum strata above_stratum, int quitting)
1006 {
1007 while ((int) (current_target.to_stratum) > (int) above_stratum)
1008 {
1009 target_close (target_stack, quitting);
1010 if (!unpush_target (target_stack))
1011 {
1012 fprintf_unfiltered (gdb_stderr,
1013 "pop_all_targets couldn't find target %s\n",
1014 target_stack->to_shortname);
1015 internal_error (__FILE__, __LINE__,
1016 _("failed internal consistency check"));
1017 break;
1018 }
1019 }
1020 }
1021
1022 void
1023 pop_all_targets (int quitting)
1024 {
1025 pop_all_targets_above (dummy_stratum, quitting);
1026 }
1027
1028 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1029
1030 int
1031 target_is_pushed (struct target_ops *t)
1032 {
1033 struct target_ops **cur;
1034
1035 /* Check magic number. If wrong, it probably means someone changed
1036 the struct definition, but not all the places that initialize one. */
1037 if (t->to_magic != OPS_MAGIC)
1038 {
1039 fprintf_unfiltered (gdb_stderr,
1040 "Magic number of %s target struct wrong\n",
1041 t->to_shortname);
1042 internal_error (__FILE__, __LINE__,
1043 _("failed internal consistency check"));
1044 }
1045
1046 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1047 if (*cur == t)
1048 return 1;
1049
1050 return 0;
1051 }
1052
1053 /* Using the objfile specified in OBJFILE, find the address for the
1054 current thread's thread-local storage with offset OFFSET. */
1055 CORE_ADDR
1056 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1057 {
1058 volatile CORE_ADDR addr = 0;
1059 struct target_ops *target;
1060
1061 for (target = current_target.beneath;
1062 target != NULL;
1063 target = target->beneath)
1064 {
1065 if (target->to_get_thread_local_address != NULL)
1066 break;
1067 }
1068
1069 if (target != NULL
1070 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1071 {
1072 ptid_t ptid = inferior_ptid;
1073 volatile struct gdb_exception ex;
1074
1075 TRY_CATCH (ex, RETURN_MASK_ALL)
1076 {
1077 CORE_ADDR lm_addr;
1078
1079 /* Fetch the load module address for this objfile. */
1080 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1081 objfile);
1082 /* If it's 0, throw the appropriate exception. */
1083 if (lm_addr == 0)
1084 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1085 _("TLS load module not found"));
1086
1087 addr = target->to_get_thread_local_address (target, ptid,
1088 lm_addr, offset);
1089 }
1090 /* If an error occurred, print TLS related messages here. Otherwise,
1091 throw the error to some higher catcher. */
1092 if (ex.reason < 0)
1093 {
1094 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1095
1096 switch (ex.error)
1097 {
1098 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1099 error (_("Cannot find thread-local variables "
1100 "in this thread library."));
1101 break;
1102 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1103 if (objfile_is_library)
1104 error (_("Cannot find shared library `%s' in dynamic"
1105 " linker's load module list"), objfile->name);
1106 else
1107 error (_("Cannot find executable file `%s' in dynamic"
1108 " linker's load module list"), objfile->name);
1109 break;
1110 case TLS_NOT_ALLOCATED_YET_ERROR:
1111 if (objfile_is_library)
1112 error (_("The inferior has not yet allocated storage for"
1113 " thread-local variables in\n"
1114 "the shared library `%s'\n"
1115 "for %s"),
1116 objfile->name, target_pid_to_str (ptid));
1117 else
1118 error (_("The inferior has not yet allocated storage for"
1119 " thread-local variables in\n"
1120 "the executable `%s'\n"
1121 "for %s"),
1122 objfile->name, target_pid_to_str (ptid));
1123 break;
1124 case TLS_GENERIC_ERROR:
1125 if (objfile_is_library)
1126 error (_("Cannot find thread-local storage for %s, "
1127 "shared library %s:\n%s"),
1128 target_pid_to_str (ptid),
1129 objfile->name, ex.message);
1130 else
1131 error (_("Cannot find thread-local storage for %s, "
1132 "executable file %s:\n%s"),
1133 target_pid_to_str (ptid),
1134 objfile->name, ex.message);
1135 break;
1136 default:
1137 throw_exception (ex);
1138 break;
1139 }
1140 }
1141 }
1142 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1143 TLS is an ABI-specific thing. But we don't do that yet. */
1144 else
1145 error (_("Cannot find thread-local variables on this target"));
1146
1147 return addr;
1148 }
1149
1150 #undef MIN
1151 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1152
1153 /* target_read_string -- read a null terminated string, up to LEN bytes,
1154 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1155 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1156 is responsible for freeing it. Return the number of bytes successfully
1157 read. */
1158
1159 int
1160 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1161 {
1162 int tlen, origlen, offset, i;
1163 gdb_byte buf[4];
1164 int errcode = 0;
1165 char *buffer;
1166 int buffer_allocated;
1167 char *bufptr;
1168 unsigned int nbytes_read = 0;
1169
1170 gdb_assert (string);
1171
1172 /* Small for testing. */
1173 buffer_allocated = 4;
1174 buffer = xmalloc (buffer_allocated);
1175 bufptr = buffer;
1176
1177 origlen = len;
1178
1179 while (len > 0)
1180 {
1181 tlen = MIN (len, 4 - (memaddr & 3));
1182 offset = memaddr & 3;
1183
1184 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1185 if (errcode != 0)
1186 {
1187 /* The transfer request might have crossed the boundary to an
1188 unallocated region of memory. Retry the transfer, requesting
1189 a single byte. */
1190 tlen = 1;
1191 offset = 0;
1192 errcode = target_read_memory (memaddr, buf, 1);
1193 if (errcode != 0)
1194 goto done;
1195 }
1196
1197 if (bufptr - buffer + tlen > buffer_allocated)
1198 {
1199 unsigned int bytes;
1200
1201 bytes = bufptr - buffer;
1202 buffer_allocated *= 2;
1203 buffer = xrealloc (buffer, buffer_allocated);
1204 bufptr = buffer + bytes;
1205 }
1206
1207 for (i = 0; i < tlen; i++)
1208 {
1209 *bufptr++ = buf[i + offset];
1210 if (buf[i + offset] == '\000')
1211 {
1212 nbytes_read += i + 1;
1213 goto done;
1214 }
1215 }
1216
1217 memaddr += tlen;
1218 len -= tlen;
1219 nbytes_read += tlen;
1220 }
1221 done:
1222 *string = buffer;
1223 if (errnop != NULL)
1224 *errnop = errcode;
1225 return nbytes_read;
1226 }
1227
1228 struct target_section_table *
1229 target_get_section_table (struct target_ops *target)
1230 {
1231 struct target_ops *t;
1232
1233 if (targetdebug)
1234 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1235
1236 for (t = target; t != NULL; t = t->beneath)
1237 if (t->to_get_section_table != NULL)
1238 return (*t->to_get_section_table) (t);
1239
1240 return NULL;
1241 }
1242
1243 /* Find a section containing ADDR. */
1244
1245 struct target_section *
1246 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1247 {
1248 struct target_section_table *table = target_get_section_table (target);
1249 struct target_section *secp;
1250
1251 if (table == NULL)
1252 return NULL;
1253
1254 for (secp = table->sections; secp < table->sections_end; secp++)
1255 {
1256 if (addr >= secp->addr && addr < secp->endaddr)
1257 return secp;
1258 }
1259 return NULL;
1260 }
1261
1262 /* Read memory from the live target, even if currently inspecting a
1263 traceframe. The return is the same as that of target_read. */
1264
1265 static LONGEST
1266 target_read_live_memory (enum target_object object,
1267 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1268 {
1269 int ret;
1270 struct cleanup *cleanup;
1271
1272 /* Switch momentarily out of tfind mode so to access live memory.
1273 Note that this must not clear global state, such as the frame
1274 cache, which must still remain valid for the previous traceframe.
1275 We may be _building_ the frame cache at this point. */
1276 cleanup = make_cleanup_restore_traceframe_number ();
1277 set_traceframe_number (-1);
1278
1279 ret = target_read (current_target.beneath, object, NULL,
1280 myaddr, memaddr, len);
1281
1282 do_cleanups (cleanup);
1283 return ret;
1284 }
1285
1286 /* Using the set of read-only target sections of OPS, read live
1287 read-only memory. Note that the actual reads start from the
1288 top-most target again.
1289
1290 For interface/parameters/return description see target.h,
1291 to_xfer_partial. */
1292
1293 static LONGEST
1294 memory_xfer_live_readonly_partial (struct target_ops *ops,
1295 enum target_object object,
1296 gdb_byte *readbuf, ULONGEST memaddr,
1297 LONGEST len)
1298 {
1299 struct target_section *secp;
1300 struct target_section_table *table;
1301
1302 secp = target_section_by_addr (ops, memaddr);
1303 if (secp != NULL
1304 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1305 & SEC_READONLY))
1306 {
1307 struct target_section *p;
1308 ULONGEST memend = memaddr + len;
1309
1310 table = target_get_section_table (ops);
1311
1312 for (p = table->sections; p < table->sections_end; p++)
1313 {
1314 if (memaddr >= p->addr)
1315 {
1316 if (memend <= p->endaddr)
1317 {
1318 /* Entire transfer is within this section. */
1319 return target_read_live_memory (object, memaddr,
1320 readbuf, len);
1321 }
1322 else if (memaddr >= p->endaddr)
1323 {
1324 /* This section ends before the transfer starts. */
1325 continue;
1326 }
1327 else
1328 {
1329 /* This section overlaps the transfer. Just do half. */
1330 len = p->endaddr - memaddr;
1331 return target_read_live_memory (object, memaddr,
1332 readbuf, len);
1333 }
1334 }
1335 }
1336 }
1337
1338 return 0;
1339 }
1340
1341 /* Perform a partial memory transfer.
1342 For docs see target.h, to_xfer_partial. */
1343
1344 static LONGEST
1345 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1346 void *readbuf, const void *writebuf, ULONGEST memaddr,
1347 LONGEST len)
1348 {
1349 LONGEST res;
1350 int reg_len;
1351 struct mem_region *region;
1352 struct inferior *inf;
1353
1354 /* Zero length requests are ok and require no work. */
1355 if (len == 0)
1356 return 0;
1357
1358 /* For accesses to unmapped overlay sections, read directly from
1359 files. Must do this first, as MEMADDR may need adjustment. */
1360 if (readbuf != NULL && overlay_debugging)
1361 {
1362 struct obj_section *section = find_pc_overlay (memaddr);
1363
1364 if (pc_in_unmapped_range (memaddr, section))
1365 {
1366 struct target_section_table *table
1367 = target_get_section_table (ops);
1368 const char *section_name = section->the_bfd_section->name;
1369
1370 memaddr = overlay_mapped_address (memaddr, section);
1371 return section_table_xfer_memory_partial (readbuf, writebuf,
1372 memaddr, len,
1373 table->sections,
1374 table->sections_end,
1375 section_name);
1376 }
1377 }
1378
1379 /* Try the executable files, if "trust-readonly-sections" is set. */
1380 if (readbuf != NULL && trust_readonly)
1381 {
1382 struct target_section *secp;
1383 struct target_section_table *table;
1384
1385 secp = target_section_by_addr (ops, memaddr);
1386 if (secp != NULL
1387 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1388 & SEC_READONLY))
1389 {
1390 table = target_get_section_table (ops);
1391 return section_table_xfer_memory_partial (readbuf, writebuf,
1392 memaddr, len,
1393 table->sections,
1394 table->sections_end,
1395 NULL);
1396 }
1397 }
1398
1399 /* If reading unavailable memory in the context of traceframes, and
1400 this address falls within a read-only section, fallback to
1401 reading from live memory. */
1402 if (readbuf != NULL && get_traceframe_number () != -1)
1403 {
1404 VEC(mem_range_s) *available;
1405
1406 /* If we fail to get the set of available memory, then the
1407 target does not support querying traceframe info, and so we
1408 attempt reading from the traceframe anyway (assuming the
1409 target implements the old QTro packet then). */
1410 if (traceframe_available_memory (&available, memaddr, len))
1411 {
1412 struct cleanup *old_chain;
1413
1414 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1415
1416 if (VEC_empty (mem_range_s, available)
1417 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1418 {
1419 /* Don't read into the traceframe's available
1420 memory. */
1421 if (!VEC_empty (mem_range_s, available))
1422 {
1423 LONGEST oldlen = len;
1424
1425 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1426 gdb_assert (len <= oldlen);
1427 }
1428
1429 do_cleanups (old_chain);
1430
1431 /* This goes through the topmost target again. */
1432 res = memory_xfer_live_readonly_partial (ops, object,
1433 readbuf, memaddr, len);
1434 if (res > 0)
1435 return res;
1436
1437 /* No use trying further, we know some memory starting
1438 at MEMADDR isn't available. */
1439 return -1;
1440 }
1441
1442 /* Don't try to read more than how much is available, in
1443 case the target implements the deprecated QTro packet to
1444 cater for older GDBs (the target's knowledge of read-only
1445 sections may be outdated by now). */
1446 len = VEC_index (mem_range_s, available, 0)->length;
1447
1448 do_cleanups (old_chain);
1449 }
1450 }
1451
1452 /* Try GDB's internal data cache. */
1453 region = lookup_mem_region (memaddr);
1454 /* region->hi == 0 means there's no upper bound. */
1455 if (memaddr + len < region->hi || region->hi == 0)
1456 reg_len = len;
1457 else
1458 reg_len = region->hi - memaddr;
1459
1460 switch (region->attrib.mode)
1461 {
1462 case MEM_RO:
1463 if (writebuf != NULL)
1464 return -1;
1465 break;
1466
1467 case MEM_WO:
1468 if (readbuf != NULL)
1469 return -1;
1470 break;
1471
1472 case MEM_FLASH:
1473 /* We only support writing to flash during "load" for now. */
1474 if (writebuf != NULL)
1475 error (_("Writing to flash memory forbidden in this context"));
1476 break;
1477
1478 case MEM_NONE:
1479 return -1;
1480 }
1481
1482 if (!ptid_equal (inferior_ptid, null_ptid))
1483 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1484 else
1485 inf = NULL;
1486
1487 if (inf != NULL
1488 /* The dcache reads whole cache lines; that doesn't play well
1489 with reading from a trace buffer, because reading outside of
1490 the collected memory range fails. */
1491 && get_traceframe_number () == -1
1492 && (region->attrib.cache
1493 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1494 {
1495 if (readbuf != NULL)
1496 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1497 reg_len, 0);
1498 else
1499 /* FIXME drow/2006-08-09: If we're going to preserve const
1500 correctness dcache_xfer_memory should take readbuf and
1501 writebuf. */
1502 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1503 (void *) writebuf,
1504 reg_len, 1);
1505 if (res <= 0)
1506 return -1;
1507 else
1508 {
1509 if (readbuf && !show_memory_breakpoints)
1510 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1511 return res;
1512 }
1513 }
1514
1515 /* If none of those methods found the memory we wanted, fall back
1516 to a target partial transfer. Normally a single call to
1517 to_xfer_partial is enough; if it doesn't recognize an object
1518 it will call the to_xfer_partial of the next target down.
1519 But for memory this won't do. Memory is the only target
1520 object which can be read from more than one valid target.
1521 A core file, for instance, could have some of memory but
1522 delegate other bits to the target below it. So, we must
1523 manually try all targets. */
1524
1525 do
1526 {
1527 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1528 readbuf, writebuf, memaddr, reg_len);
1529 if (res > 0)
1530 break;
1531
1532 /* We want to continue past core files to executables, but not
1533 past a running target's memory. */
1534 if (ops->to_has_all_memory (ops))
1535 break;
1536
1537 ops = ops->beneath;
1538 }
1539 while (ops != NULL);
1540
1541 if (res > 0 && readbuf != NULL && !show_memory_breakpoints)
1542 breakpoint_restore_shadows (readbuf, memaddr, reg_len);
1543
1544 /* Make sure the cache gets updated no matter what - if we are writing
1545 to the stack. Even if this write is not tagged as such, we still need
1546 to update the cache. */
1547
1548 if (res > 0
1549 && inf != NULL
1550 && writebuf != NULL
1551 && !region->attrib.cache
1552 && stack_cache_enabled_p
1553 && object != TARGET_OBJECT_STACK_MEMORY)
1554 {
1555 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1556 }
1557
1558 /* If we still haven't got anything, return the last error. We
1559 give up. */
1560 return res;
1561 }
1562
1563 static void
1564 restore_show_memory_breakpoints (void *arg)
1565 {
1566 show_memory_breakpoints = (uintptr_t) arg;
1567 }
1568
1569 struct cleanup *
1570 make_show_memory_breakpoints_cleanup (int show)
1571 {
1572 int current = show_memory_breakpoints;
1573
1574 show_memory_breakpoints = show;
1575 return make_cleanup (restore_show_memory_breakpoints,
1576 (void *) (uintptr_t) current);
1577 }
1578
1579 /* For docs see target.h, to_xfer_partial. */
1580
1581 static LONGEST
1582 target_xfer_partial (struct target_ops *ops,
1583 enum target_object object, const char *annex,
1584 void *readbuf, const void *writebuf,
1585 ULONGEST offset, LONGEST len)
1586 {
1587 LONGEST retval;
1588
1589 gdb_assert (ops->to_xfer_partial != NULL);
1590
1591 if (writebuf && !may_write_memory)
1592 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1593 core_addr_to_string_nz (offset), plongest (len));
1594
1595 /* If this is a memory transfer, let the memory-specific code
1596 have a look at it instead. Memory transfers are more
1597 complicated. */
1598 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1599 retval = memory_xfer_partial (ops, object, readbuf,
1600 writebuf, offset, len);
1601 else
1602 {
1603 enum target_object raw_object = object;
1604
1605 /* If this is a raw memory transfer, request the normal
1606 memory object from other layers. */
1607 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1608 raw_object = TARGET_OBJECT_MEMORY;
1609
1610 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1611 writebuf, offset, len);
1612 }
1613
1614 if (targetdebug)
1615 {
1616 const unsigned char *myaddr = NULL;
1617
1618 fprintf_unfiltered (gdb_stdlog,
1619 "%s:target_xfer_partial "
1620 "(%d, %s, %s, %s, %s, %s) = %s",
1621 ops->to_shortname,
1622 (int) object,
1623 (annex ? annex : "(null)"),
1624 host_address_to_string (readbuf),
1625 host_address_to_string (writebuf),
1626 core_addr_to_string_nz (offset),
1627 plongest (len), plongest (retval));
1628
1629 if (readbuf)
1630 myaddr = readbuf;
1631 if (writebuf)
1632 myaddr = writebuf;
1633 if (retval > 0 && myaddr != NULL)
1634 {
1635 int i;
1636
1637 fputs_unfiltered (", bytes =", gdb_stdlog);
1638 for (i = 0; i < retval; i++)
1639 {
1640 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1641 {
1642 if (targetdebug < 2 && i > 0)
1643 {
1644 fprintf_unfiltered (gdb_stdlog, " ...");
1645 break;
1646 }
1647 fprintf_unfiltered (gdb_stdlog, "\n");
1648 }
1649
1650 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1651 }
1652 }
1653
1654 fputc_unfiltered ('\n', gdb_stdlog);
1655 }
1656 return retval;
1657 }
1658
1659 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1660 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1661 if any error occurs.
1662
1663 If an error occurs, no guarantee is made about the contents of the data at
1664 MYADDR. In particular, the caller should not depend upon partial reads
1665 filling the buffer with good data. There is no way for the caller to know
1666 how much good data might have been transfered anyway. Callers that can
1667 deal with partial reads should call target_read (which will retry until
1668 it makes no progress, and then return how much was transferred). */
1669
1670 int
1671 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1672 {
1673 /* Dispatch to the topmost target, not the flattened current_target.
1674 Memory accesses check target->to_has_(all_)memory, and the
1675 flattened target doesn't inherit those. */
1676 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1677 myaddr, memaddr, len) == len)
1678 return 0;
1679 else
1680 return EIO;
1681 }
1682
1683 /* Like target_read_memory, but specify explicitly that this is a read from
1684 the target's stack. This may trigger different cache behavior. */
1685
1686 int
1687 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1688 {
1689 /* Dispatch to the topmost target, not the flattened current_target.
1690 Memory accesses check target->to_has_(all_)memory, and the
1691 flattened target doesn't inherit those. */
1692
1693 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1694 myaddr, memaddr, len) == len)
1695 return 0;
1696 else
1697 return EIO;
1698 }
1699
1700 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1701 Returns either 0 for success or an errno value if any error occurs.
1702 If an error occurs, no guarantee is made about how much data got written.
1703 Callers that can deal with partial writes should call target_write. */
1704
1705 int
1706 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1707 {
1708 /* Dispatch to the topmost target, not the flattened current_target.
1709 Memory accesses check target->to_has_(all_)memory, and the
1710 flattened target doesn't inherit those. */
1711 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1712 myaddr, memaddr, len) == len)
1713 return 0;
1714 else
1715 return EIO;
1716 }
1717
1718 /* Fetch the target's memory map. */
1719
1720 VEC(mem_region_s) *
1721 target_memory_map (void)
1722 {
1723 VEC(mem_region_s) *result;
1724 struct mem_region *last_one, *this_one;
1725 int ix;
1726 struct target_ops *t;
1727
1728 if (targetdebug)
1729 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1730
1731 for (t = current_target.beneath; t != NULL; t = t->beneath)
1732 if (t->to_memory_map != NULL)
1733 break;
1734
1735 if (t == NULL)
1736 return NULL;
1737
1738 result = t->to_memory_map (t);
1739 if (result == NULL)
1740 return NULL;
1741
1742 qsort (VEC_address (mem_region_s, result),
1743 VEC_length (mem_region_s, result),
1744 sizeof (struct mem_region), mem_region_cmp);
1745
1746 /* Check that regions do not overlap. Simultaneously assign
1747 a numbering for the "mem" commands to use to refer to
1748 each region. */
1749 last_one = NULL;
1750 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1751 {
1752 this_one->number = ix;
1753
1754 if (last_one && last_one->hi > this_one->lo)
1755 {
1756 warning (_("Overlapping regions in memory map: ignoring"));
1757 VEC_free (mem_region_s, result);
1758 return NULL;
1759 }
1760 last_one = this_one;
1761 }
1762
1763 return result;
1764 }
1765
1766 void
1767 target_flash_erase (ULONGEST address, LONGEST length)
1768 {
1769 struct target_ops *t;
1770
1771 for (t = current_target.beneath; t != NULL; t = t->beneath)
1772 if (t->to_flash_erase != NULL)
1773 {
1774 if (targetdebug)
1775 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1776 hex_string (address), phex (length, 0));
1777 t->to_flash_erase (t, address, length);
1778 return;
1779 }
1780
1781 tcomplain ();
1782 }
1783
1784 void
1785 target_flash_done (void)
1786 {
1787 struct target_ops *t;
1788
1789 for (t = current_target.beneath; t != NULL; t = t->beneath)
1790 if (t->to_flash_done != NULL)
1791 {
1792 if (targetdebug)
1793 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1794 t->to_flash_done (t);
1795 return;
1796 }
1797
1798 tcomplain ();
1799 }
1800
1801 static void
1802 show_trust_readonly (struct ui_file *file, int from_tty,
1803 struct cmd_list_element *c, const char *value)
1804 {
1805 fprintf_filtered (file,
1806 _("Mode for reading from readonly sections is %s.\n"),
1807 value);
1808 }
1809
1810 /* More generic transfers. */
1811
1812 static LONGEST
1813 default_xfer_partial (struct target_ops *ops, enum target_object object,
1814 const char *annex, gdb_byte *readbuf,
1815 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1816 {
1817 if (object == TARGET_OBJECT_MEMORY
1818 && ops->deprecated_xfer_memory != NULL)
1819 /* If available, fall back to the target's
1820 "deprecated_xfer_memory" method. */
1821 {
1822 int xfered = -1;
1823
1824 errno = 0;
1825 if (writebuf != NULL)
1826 {
1827 void *buffer = xmalloc (len);
1828 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1829
1830 memcpy (buffer, writebuf, len);
1831 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1832 1/*write*/, NULL, ops);
1833 do_cleanups (cleanup);
1834 }
1835 if (readbuf != NULL)
1836 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1837 0/*read*/, NULL, ops);
1838 if (xfered > 0)
1839 return xfered;
1840 else if (xfered == 0 && errno == 0)
1841 /* "deprecated_xfer_memory" uses 0, cross checked against
1842 ERRNO as one indication of an error. */
1843 return 0;
1844 else
1845 return -1;
1846 }
1847 else if (ops->beneath != NULL)
1848 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1849 readbuf, writebuf, offset, len);
1850 else
1851 return -1;
1852 }
1853
1854 /* The xfer_partial handler for the topmost target. Unlike the default,
1855 it does not need to handle memory specially; it just passes all
1856 requests down the stack. */
1857
1858 static LONGEST
1859 current_xfer_partial (struct target_ops *ops, enum target_object object,
1860 const char *annex, gdb_byte *readbuf,
1861 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1862 {
1863 if (ops->beneath != NULL)
1864 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1865 readbuf, writebuf, offset, len);
1866 else
1867 return -1;
1868 }
1869
1870 /* Target vector read/write partial wrapper functions. */
1871
1872 static LONGEST
1873 target_read_partial (struct target_ops *ops,
1874 enum target_object object,
1875 const char *annex, gdb_byte *buf,
1876 ULONGEST offset, LONGEST len)
1877 {
1878 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1879 }
1880
1881 static LONGEST
1882 target_write_partial (struct target_ops *ops,
1883 enum target_object object,
1884 const char *annex, const gdb_byte *buf,
1885 ULONGEST offset, LONGEST len)
1886 {
1887 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1888 }
1889
1890 /* Wrappers to perform the full transfer. */
1891
1892 /* For docs on target_read see target.h. */
1893
1894 LONGEST
1895 target_read (struct target_ops *ops,
1896 enum target_object object,
1897 const char *annex, gdb_byte *buf,
1898 ULONGEST offset, LONGEST len)
1899 {
1900 LONGEST xfered = 0;
1901
1902 while (xfered < len)
1903 {
1904 LONGEST xfer = target_read_partial (ops, object, annex,
1905 (gdb_byte *) buf + xfered,
1906 offset + xfered, len - xfered);
1907
1908 /* Call an observer, notifying them of the xfer progress? */
1909 if (xfer == 0)
1910 return xfered;
1911 if (xfer < 0)
1912 return -1;
1913 xfered += xfer;
1914 QUIT;
1915 }
1916 return len;
1917 }
1918
1919 /* Assuming that the entire [begin, end) range of memory cannot be
1920 read, try to read whatever subrange is possible to read.
1921
1922 The function returns, in RESULT, either zero or one memory block.
1923 If there's a readable subrange at the beginning, it is completely
1924 read and returned. Any further readable subrange will not be read.
1925 Otherwise, if there's a readable subrange at the end, it will be
1926 completely read and returned. Any readable subranges before it
1927 (obviously, not starting at the beginning), will be ignored. In
1928 other cases -- either no readable subrange, or readable subrange(s)
1929 that is neither at the beginning, or end, nothing is returned.
1930
1931 The purpose of this function is to handle a read across a boundary
1932 of accessible memory in a case when memory map is not available.
1933 The above restrictions are fine for this case, but will give
1934 incorrect results if the memory is 'patchy'. However, supporting
1935 'patchy' memory would require trying to read every single byte,
1936 and it seems unacceptable solution. Explicit memory map is
1937 recommended for this case -- and target_read_memory_robust will
1938 take care of reading multiple ranges then. */
1939
1940 static void
1941 read_whatever_is_readable (struct target_ops *ops,
1942 ULONGEST begin, ULONGEST end,
1943 VEC(memory_read_result_s) **result)
1944 {
1945 gdb_byte *buf = xmalloc (end - begin);
1946 ULONGEST current_begin = begin;
1947 ULONGEST current_end = end;
1948 int forward;
1949 memory_read_result_s r;
1950
1951 /* If we previously failed to read 1 byte, nothing can be done here. */
1952 if (end - begin <= 1)
1953 {
1954 xfree (buf);
1955 return;
1956 }
1957
1958 /* Check that either first or the last byte is readable, and give up
1959 if not. This heuristic is meant to permit reading accessible memory
1960 at the boundary of accessible region. */
1961 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1962 buf, begin, 1) == 1)
1963 {
1964 forward = 1;
1965 ++current_begin;
1966 }
1967 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1968 buf + (end-begin) - 1, end - 1, 1) == 1)
1969 {
1970 forward = 0;
1971 --current_end;
1972 }
1973 else
1974 {
1975 xfree (buf);
1976 return;
1977 }
1978
1979 /* Loop invariant is that the [current_begin, current_end) was previously
1980 found to be not readable as a whole.
1981
1982 Note loop condition -- if the range has 1 byte, we can't divide the range
1983 so there's no point trying further. */
1984 while (current_end - current_begin > 1)
1985 {
1986 ULONGEST first_half_begin, first_half_end;
1987 ULONGEST second_half_begin, second_half_end;
1988 LONGEST xfer;
1989 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1990
1991 if (forward)
1992 {
1993 first_half_begin = current_begin;
1994 first_half_end = middle;
1995 second_half_begin = middle;
1996 second_half_end = current_end;
1997 }
1998 else
1999 {
2000 first_half_begin = middle;
2001 first_half_end = current_end;
2002 second_half_begin = current_begin;
2003 second_half_end = middle;
2004 }
2005
2006 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2007 buf + (first_half_begin - begin),
2008 first_half_begin,
2009 first_half_end - first_half_begin);
2010
2011 if (xfer == first_half_end - first_half_begin)
2012 {
2013 /* This half reads up fine. So, the error must be in the
2014 other half. */
2015 current_begin = second_half_begin;
2016 current_end = second_half_end;
2017 }
2018 else
2019 {
2020 /* This half is not readable. Because we've tried one byte, we
2021 know some part of this half if actually redable. Go to the next
2022 iteration to divide again and try to read.
2023
2024 We don't handle the other half, because this function only tries
2025 to read a single readable subrange. */
2026 current_begin = first_half_begin;
2027 current_end = first_half_end;
2028 }
2029 }
2030
2031 if (forward)
2032 {
2033 /* The [begin, current_begin) range has been read. */
2034 r.begin = begin;
2035 r.end = current_begin;
2036 r.data = buf;
2037 }
2038 else
2039 {
2040 /* The [current_end, end) range has been read. */
2041 LONGEST rlen = end - current_end;
2042
2043 r.data = xmalloc (rlen);
2044 memcpy (r.data, buf + current_end - begin, rlen);
2045 r.begin = current_end;
2046 r.end = end;
2047 xfree (buf);
2048 }
2049 VEC_safe_push(memory_read_result_s, (*result), &r);
2050 }
2051
2052 void
2053 free_memory_read_result_vector (void *x)
2054 {
2055 VEC(memory_read_result_s) *v = x;
2056 memory_read_result_s *current;
2057 int ix;
2058
2059 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2060 {
2061 xfree (current->data);
2062 }
2063 VEC_free (memory_read_result_s, v);
2064 }
2065
2066 VEC(memory_read_result_s) *
2067 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2068 {
2069 VEC(memory_read_result_s) *result = 0;
2070
2071 LONGEST xfered = 0;
2072 while (xfered < len)
2073 {
2074 struct mem_region *region = lookup_mem_region (offset + xfered);
2075 LONGEST rlen;
2076
2077 /* If there is no explicit region, a fake one should be created. */
2078 gdb_assert (region);
2079
2080 if (region->hi == 0)
2081 rlen = len - xfered;
2082 else
2083 rlen = region->hi - offset;
2084
2085 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2086 {
2087 /* Cannot read this region. Note that we can end up here only
2088 if the region is explicitly marked inaccessible, or
2089 'inaccessible-by-default' is in effect. */
2090 xfered += rlen;
2091 }
2092 else
2093 {
2094 LONGEST to_read = min (len - xfered, rlen);
2095 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2096
2097 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2098 (gdb_byte *) buffer,
2099 offset + xfered, to_read);
2100 /* Call an observer, notifying them of the xfer progress? */
2101 if (xfer <= 0)
2102 {
2103 /* Got an error reading full chunk. See if maybe we can read
2104 some subrange. */
2105 xfree (buffer);
2106 read_whatever_is_readable (ops, offset + xfered,
2107 offset + xfered + to_read, &result);
2108 xfered += to_read;
2109 }
2110 else
2111 {
2112 struct memory_read_result r;
2113 r.data = buffer;
2114 r.begin = offset + xfered;
2115 r.end = r.begin + xfer;
2116 VEC_safe_push (memory_read_result_s, result, &r);
2117 xfered += xfer;
2118 }
2119 QUIT;
2120 }
2121 }
2122 return result;
2123 }
2124
2125
2126 /* An alternative to target_write with progress callbacks. */
2127
2128 LONGEST
2129 target_write_with_progress (struct target_ops *ops,
2130 enum target_object object,
2131 const char *annex, const gdb_byte *buf,
2132 ULONGEST offset, LONGEST len,
2133 void (*progress) (ULONGEST, void *), void *baton)
2134 {
2135 LONGEST xfered = 0;
2136
2137 /* Give the progress callback a chance to set up. */
2138 if (progress)
2139 (*progress) (0, baton);
2140
2141 while (xfered < len)
2142 {
2143 LONGEST xfer = target_write_partial (ops, object, annex,
2144 (gdb_byte *) buf + xfered,
2145 offset + xfered, len - xfered);
2146
2147 if (xfer == 0)
2148 return xfered;
2149 if (xfer < 0)
2150 return -1;
2151
2152 if (progress)
2153 (*progress) (xfer, baton);
2154
2155 xfered += xfer;
2156 QUIT;
2157 }
2158 return len;
2159 }
2160
2161 /* For docs on target_write see target.h. */
2162
2163 LONGEST
2164 target_write (struct target_ops *ops,
2165 enum target_object object,
2166 const char *annex, const gdb_byte *buf,
2167 ULONGEST offset, LONGEST len)
2168 {
2169 return target_write_with_progress (ops, object, annex, buf, offset, len,
2170 NULL, NULL);
2171 }
2172
2173 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2174 the size of the transferred data. PADDING additional bytes are
2175 available in *BUF_P. This is a helper function for
2176 target_read_alloc; see the declaration of that function for more
2177 information. */
2178
2179 static LONGEST
2180 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2181 const char *annex, gdb_byte **buf_p, int padding)
2182 {
2183 size_t buf_alloc, buf_pos;
2184 gdb_byte *buf;
2185 LONGEST n;
2186
2187 /* This function does not have a length parameter; it reads the
2188 entire OBJECT). Also, it doesn't support objects fetched partly
2189 from one target and partly from another (in a different stratum,
2190 e.g. a core file and an executable). Both reasons make it
2191 unsuitable for reading memory. */
2192 gdb_assert (object != TARGET_OBJECT_MEMORY);
2193
2194 /* Start by reading up to 4K at a time. The target will throttle
2195 this number down if necessary. */
2196 buf_alloc = 4096;
2197 buf = xmalloc (buf_alloc);
2198 buf_pos = 0;
2199 while (1)
2200 {
2201 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2202 buf_pos, buf_alloc - buf_pos - padding);
2203 if (n < 0)
2204 {
2205 /* An error occurred. */
2206 xfree (buf);
2207 return -1;
2208 }
2209 else if (n == 0)
2210 {
2211 /* Read all there was. */
2212 if (buf_pos == 0)
2213 xfree (buf);
2214 else
2215 *buf_p = buf;
2216 return buf_pos;
2217 }
2218
2219 buf_pos += n;
2220
2221 /* If the buffer is filling up, expand it. */
2222 if (buf_alloc < buf_pos * 2)
2223 {
2224 buf_alloc *= 2;
2225 buf = xrealloc (buf, buf_alloc);
2226 }
2227
2228 QUIT;
2229 }
2230 }
2231
2232 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2233 the size of the transferred data. See the declaration in "target.h"
2234 function for more information about the return value. */
2235
2236 LONGEST
2237 target_read_alloc (struct target_ops *ops, enum target_object object,
2238 const char *annex, gdb_byte **buf_p)
2239 {
2240 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2241 }
2242
2243 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2244 returned as a string, allocated using xmalloc. If an error occurs
2245 or the transfer is unsupported, NULL is returned. Empty objects
2246 are returned as allocated but empty strings. A warning is issued
2247 if the result contains any embedded NUL bytes. */
2248
2249 char *
2250 target_read_stralloc (struct target_ops *ops, enum target_object object,
2251 const char *annex)
2252 {
2253 gdb_byte *buffer;
2254 LONGEST transferred;
2255
2256 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2257
2258 if (transferred < 0)
2259 return NULL;
2260
2261 if (transferred == 0)
2262 return xstrdup ("");
2263
2264 buffer[transferred] = 0;
2265 if (strlen (buffer) < transferred)
2266 warning (_("target object %d, annex %s, "
2267 "contained unexpected null characters"),
2268 (int) object, annex ? annex : "(none)");
2269
2270 return (char *) buffer;
2271 }
2272
2273 /* Memory transfer methods. */
2274
2275 void
2276 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2277 LONGEST len)
2278 {
2279 /* This method is used to read from an alternate, non-current
2280 target. This read must bypass the overlay support (as symbols
2281 don't match this target), and GDB's internal cache (wrong cache
2282 for this target). */
2283 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2284 != len)
2285 memory_error (EIO, addr);
2286 }
2287
2288 ULONGEST
2289 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2290 int len, enum bfd_endian byte_order)
2291 {
2292 gdb_byte buf[sizeof (ULONGEST)];
2293
2294 gdb_assert (len <= sizeof (buf));
2295 get_target_memory (ops, addr, buf, len);
2296 return extract_unsigned_integer (buf, len, byte_order);
2297 }
2298
2299 int
2300 target_insert_breakpoint (struct gdbarch *gdbarch,
2301 struct bp_target_info *bp_tgt)
2302 {
2303 if (!may_insert_breakpoints)
2304 {
2305 warning (_("May not insert breakpoints"));
2306 return 1;
2307 }
2308
2309 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2310 }
2311
2312 int
2313 target_remove_breakpoint (struct gdbarch *gdbarch,
2314 struct bp_target_info *bp_tgt)
2315 {
2316 /* This is kind of a weird case to handle, but the permission might
2317 have been changed after breakpoints were inserted - in which case
2318 we should just take the user literally and assume that any
2319 breakpoints should be left in place. */
2320 if (!may_insert_breakpoints)
2321 {
2322 warning (_("May not remove breakpoints"));
2323 return 1;
2324 }
2325
2326 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2327 }
2328
2329 static void
2330 target_info (char *args, int from_tty)
2331 {
2332 struct target_ops *t;
2333 int has_all_mem = 0;
2334
2335 if (symfile_objfile != NULL)
2336 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2337
2338 for (t = target_stack; t != NULL; t = t->beneath)
2339 {
2340 if (!(*t->to_has_memory) (t))
2341 continue;
2342
2343 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2344 continue;
2345 if (has_all_mem)
2346 printf_unfiltered (_("\tWhile running this, "
2347 "GDB does not access memory from...\n"));
2348 printf_unfiltered ("%s:\n", t->to_longname);
2349 (t->to_files_info) (t);
2350 has_all_mem = (*t->to_has_all_memory) (t);
2351 }
2352 }
2353
2354 /* This function is called before any new inferior is created, e.g.
2355 by running a program, attaching, or connecting to a target.
2356 It cleans up any state from previous invocations which might
2357 change between runs. This is a subset of what target_preopen
2358 resets (things which might change between targets). */
2359
2360 void
2361 target_pre_inferior (int from_tty)
2362 {
2363 /* Clear out solib state. Otherwise the solib state of the previous
2364 inferior might have survived and is entirely wrong for the new
2365 target. This has been observed on GNU/Linux using glibc 2.3. How
2366 to reproduce:
2367
2368 bash$ ./foo&
2369 [1] 4711
2370 bash$ ./foo&
2371 [1] 4712
2372 bash$ gdb ./foo
2373 [...]
2374 (gdb) attach 4711
2375 (gdb) detach
2376 (gdb) attach 4712
2377 Cannot access memory at address 0xdeadbeef
2378 */
2379
2380 /* In some OSs, the shared library list is the same/global/shared
2381 across inferiors. If code is shared between processes, so are
2382 memory regions and features. */
2383 if (!gdbarch_has_global_solist (target_gdbarch))
2384 {
2385 no_shared_libraries (NULL, from_tty);
2386
2387 invalidate_target_mem_regions ();
2388
2389 target_clear_description ();
2390 }
2391 }
2392
2393 /* Callback for iterate_over_inferiors. Gets rid of the given
2394 inferior. */
2395
2396 static int
2397 dispose_inferior (struct inferior *inf, void *args)
2398 {
2399 struct thread_info *thread;
2400
2401 thread = any_thread_of_process (inf->pid);
2402 if (thread)
2403 {
2404 switch_to_thread (thread->ptid);
2405
2406 /* Core inferiors actually should be detached, not killed. */
2407 if (target_has_execution)
2408 target_kill ();
2409 else
2410 target_detach (NULL, 0);
2411 }
2412
2413 return 0;
2414 }
2415
2416 /* This is to be called by the open routine before it does
2417 anything. */
2418
2419 void
2420 target_preopen (int from_tty)
2421 {
2422 dont_repeat ();
2423
2424 if (have_inferiors ())
2425 {
2426 if (!from_tty
2427 || !have_live_inferiors ()
2428 || query (_("A program is being debugged already. Kill it? ")))
2429 iterate_over_inferiors (dispose_inferior, NULL);
2430 else
2431 error (_("Program not killed."));
2432 }
2433
2434 /* Calling target_kill may remove the target from the stack. But if
2435 it doesn't (which seems like a win for UDI), remove it now. */
2436 /* Leave the exec target, though. The user may be switching from a
2437 live process to a core of the same program. */
2438 pop_all_targets_above (file_stratum, 0);
2439
2440 target_pre_inferior (from_tty);
2441 }
2442
2443 /* Detach a target after doing deferred register stores. */
2444
2445 void
2446 target_detach (char *args, int from_tty)
2447 {
2448 struct target_ops* t;
2449
2450 if (gdbarch_has_global_breakpoints (target_gdbarch))
2451 /* Don't remove global breakpoints here. They're removed on
2452 disconnection from the target. */
2453 ;
2454 else
2455 /* If we're in breakpoints-always-inserted mode, have to remove
2456 them before detaching. */
2457 remove_breakpoints_pid (PIDGET (inferior_ptid));
2458
2459 prepare_for_detach ();
2460
2461 for (t = current_target.beneath; t != NULL; t = t->beneath)
2462 {
2463 if (t->to_detach != NULL)
2464 {
2465 t->to_detach (t, args, from_tty);
2466 if (targetdebug)
2467 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2468 args, from_tty);
2469 return;
2470 }
2471 }
2472
2473 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2474 }
2475
2476 void
2477 target_disconnect (char *args, int from_tty)
2478 {
2479 struct target_ops *t;
2480
2481 /* If we're in breakpoints-always-inserted mode or if breakpoints
2482 are global across processes, we have to remove them before
2483 disconnecting. */
2484 remove_breakpoints ();
2485
2486 for (t = current_target.beneath; t != NULL; t = t->beneath)
2487 if (t->to_disconnect != NULL)
2488 {
2489 if (targetdebug)
2490 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2491 args, from_tty);
2492 t->to_disconnect (t, args, from_tty);
2493 return;
2494 }
2495
2496 tcomplain ();
2497 }
2498
2499 ptid_t
2500 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2501 {
2502 struct target_ops *t;
2503
2504 for (t = current_target.beneath; t != NULL; t = t->beneath)
2505 {
2506 if (t->to_wait != NULL)
2507 {
2508 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2509
2510 if (targetdebug)
2511 {
2512 char *status_string;
2513
2514 status_string = target_waitstatus_to_string (status);
2515 fprintf_unfiltered (gdb_stdlog,
2516 "target_wait (%d, status) = %d, %s\n",
2517 PIDGET (ptid), PIDGET (retval),
2518 status_string);
2519 xfree (status_string);
2520 }
2521
2522 return retval;
2523 }
2524 }
2525
2526 noprocess ();
2527 }
2528
2529 char *
2530 target_pid_to_str (ptid_t ptid)
2531 {
2532 struct target_ops *t;
2533
2534 for (t = current_target.beneath; t != NULL; t = t->beneath)
2535 {
2536 if (t->to_pid_to_str != NULL)
2537 return (*t->to_pid_to_str) (t, ptid);
2538 }
2539
2540 return normal_pid_to_str (ptid);
2541 }
2542
2543 char *
2544 target_thread_name (struct thread_info *info)
2545 {
2546 struct target_ops *t;
2547
2548 for (t = current_target.beneath; t != NULL; t = t->beneath)
2549 {
2550 if (t->to_thread_name != NULL)
2551 return (*t->to_thread_name) (info);
2552 }
2553
2554 return NULL;
2555 }
2556
2557 void
2558 target_resume (ptid_t ptid, int step, enum target_signal signal)
2559 {
2560 struct target_ops *t;
2561
2562 target_dcache_invalidate ();
2563
2564 for (t = current_target.beneath; t != NULL; t = t->beneath)
2565 {
2566 if (t->to_resume != NULL)
2567 {
2568 t->to_resume (t, ptid, step, signal);
2569 if (targetdebug)
2570 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2571 PIDGET (ptid),
2572 step ? "step" : "continue",
2573 target_signal_to_name (signal));
2574
2575 registers_changed_ptid (ptid);
2576 set_executing (ptid, 1);
2577 set_running (ptid, 1);
2578 clear_inline_frame_state (ptid);
2579 return;
2580 }
2581 }
2582
2583 noprocess ();
2584 }
2585
2586 void
2587 target_pass_signals (int numsigs, unsigned char *pass_signals)
2588 {
2589 struct target_ops *t;
2590
2591 for (t = current_target.beneath; t != NULL; t = t->beneath)
2592 {
2593 if (t->to_pass_signals != NULL)
2594 {
2595 if (targetdebug)
2596 {
2597 int i;
2598
2599 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2600 numsigs);
2601
2602 for (i = 0; i < numsigs; i++)
2603 if (pass_signals[i])
2604 fprintf_unfiltered (gdb_stdlog, " %s",
2605 target_signal_to_name (i));
2606
2607 fprintf_unfiltered (gdb_stdlog, " })\n");
2608 }
2609
2610 (*t->to_pass_signals) (numsigs, pass_signals);
2611 return;
2612 }
2613 }
2614 }
2615
2616 /* Look through the list of possible targets for a target that can
2617 follow forks. */
2618
2619 int
2620 target_follow_fork (int follow_child)
2621 {
2622 struct target_ops *t;
2623
2624 for (t = current_target.beneath; t != NULL; t = t->beneath)
2625 {
2626 if (t->to_follow_fork != NULL)
2627 {
2628 int retval = t->to_follow_fork (t, follow_child);
2629
2630 if (targetdebug)
2631 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2632 follow_child, retval);
2633 return retval;
2634 }
2635 }
2636
2637 /* Some target returned a fork event, but did not know how to follow it. */
2638 internal_error (__FILE__, __LINE__,
2639 _("could not find a target to follow fork"));
2640 }
2641
2642 void
2643 target_mourn_inferior (void)
2644 {
2645 struct target_ops *t;
2646
2647 for (t = current_target.beneath; t != NULL; t = t->beneath)
2648 {
2649 if (t->to_mourn_inferior != NULL)
2650 {
2651 t->to_mourn_inferior (t);
2652 if (targetdebug)
2653 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2654
2655 /* We no longer need to keep handles on any of the object files.
2656 Make sure to release them to avoid unnecessarily locking any
2657 of them while we're not actually debugging. */
2658 bfd_cache_close_all ();
2659
2660 return;
2661 }
2662 }
2663
2664 internal_error (__FILE__, __LINE__,
2665 _("could not find a target to follow mourn inferior"));
2666 }
2667
2668 /* Look for a target which can describe architectural features, starting
2669 from TARGET. If we find one, return its description. */
2670
2671 const struct target_desc *
2672 target_read_description (struct target_ops *target)
2673 {
2674 struct target_ops *t;
2675
2676 for (t = target; t != NULL; t = t->beneath)
2677 if (t->to_read_description != NULL)
2678 {
2679 const struct target_desc *tdesc;
2680
2681 tdesc = t->to_read_description (t);
2682 if (tdesc)
2683 return tdesc;
2684 }
2685
2686 return NULL;
2687 }
2688
2689 /* The default implementation of to_search_memory.
2690 This implements a basic search of memory, reading target memory and
2691 performing the search here (as opposed to performing the search in on the
2692 target side with, for example, gdbserver). */
2693
2694 int
2695 simple_search_memory (struct target_ops *ops,
2696 CORE_ADDR start_addr, ULONGEST search_space_len,
2697 const gdb_byte *pattern, ULONGEST pattern_len,
2698 CORE_ADDR *found_addrp)
2699 {
2700 /* NOTE: also defined in find.c testcase. */
2701 #define SEARCH_CHUNK_SIZE 16000
2702 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2703 /* Buffer to hold memory contents for searching. */
2704 gdb_byte *search_buf;
2705 unsigned search_buf_size;
2706 struct cleanup *old_cleanups;
2707
2708 search_buf_size = chunk_size + pattern_len - 1;
2709
2710 /* No point in trying to allocate a buffer larger than the search space. */
2711 if (search_space_len < search_buf_size)
2712 search_buf_size = search_space_len;
2713
2714 search_buf = malloc (search_buf_size);
2715 if (search_buf == NULL)
2716 error (_("Unable to allocate memory to perform the search."));
2717 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2718
2719 /* Prime the search buffer. */
2720
2721 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2722 search_buf, start_addr, search_buf_size) != search_buf_size)
2723 {
2724 warning (_("Unable to access target memory at %s, halting search."),
2725 hex_string (start_addr));
2726 do_cleanups (old_cleanups);
2727 return -1;
2728 }
2729
2730 /* Perform the search.
2731
2732 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2733 When we've scanned N bytes we copy the trailing bytes to the start and
2734 read in another N bytes. */
2735
2736 while (search_space_len >= pattern_len)
2737 {
2738 gdb_byte *found_ptr;
2739 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2740
2741 found_ptr = memmem (search_buf, nr_search_bytes,
2742 pattern, pattern_len);
2743
2744 if (found_ptr != NULL)
2745 {
2746 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2747
2748 *found_addrp = found_addr;
2749 do_cleanups (old_cleanups);
2750 return 1;
2751 }
2752
2753 /* Not found in this chunk, skip to next chunk. */
2754
2755 /* Don't let search_space_len wrap here, it's unsigned. */
2756 if (search_space_len >= chunk_size)
2757 search_space_len -= chunk_size;
2758 else
2759 search_space_len = 0;
2760
2761 if (search_space_len >= pattern_len)
2762 {
2763 unsigned keep_len = search_buf_size - chunk_size;
2764 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2765 int nr_to_read;
2766
2767 /* Copy the trailing part of the previous iteration to the front
2768 of the buffer for the next iteration. */
2769 gdb_assert (keep_len == pattern_len - 1);
2770 memcpy (search_buf, search_buf + chunk_size, keep_len);
2771
2772 nr_to_read = min (search_space_len - keep_len, chunk_size);
2773
2774 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2775 search_buf + keep_len, read_addr,
2776 nr_to_read) != nr_to_read)
2777 {
2778 warning (_("Unable to access target "
2779 "memory at %s, halting search."),
2780 hex_string (read_addr));
2781 do_cleanups (old_cleanups);
2782 return -1;
2783 }
2784
2785 start_addr += chunk_size;
2786 }
2787 }
2788
2789 /* Not found. */
2790
2791 do_cleanups (old_cleanups);
2792 return 0;
2793 }
2794
2795 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2796 sequence of bytes in PATTERN with length PATTERN_LEN.
2797
2798 The result is 1 if found, 0 if not found, and -1 if there was an error
2799 requiring halting of the search (e.g. memory read error).
2800 If the pattern is found the address is recorded in FOUND_ADDRP. */
2801
2802 int
2803 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2804 const gdb_byte *pattern, ULONGEST pattern_len,
2805 CORE_ADDR *found_addrp)
2806 {
2807 struct target_ops *t;
2808 int found;
2809
2810 /* We don't use INHERIT to set current_target.to_search_memory,
2811 so we have to scan the target stack and handle targetdebug
2812 ourselves. */
2813
2814 if (targetdebug)
2815 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2816 hex_string (start_addr));
2817
2818 for (t = current_target.beneath; t != NULL; t = t->beneath)
2819 if (t->to_search_memory != NULL)
2820 break;
2821
2822 if (t != NULL)
2823 {
2824 found = t->to_search_memory (t, start_addr, search_space_len,
2825 pattern, pattern_len, found_addrp);
2826 }
2827 else
2828 {
2829 /* If a special version of to_search_memory isn't available, use the
2830 simple version. */
2831 found = simple_search_memory (current_target.beneath,
2832 start_addr, search_space_len,
2833 pattern, pattern_len, found_addrp);
2834 }
2835
2836 if (targetdebug)
2837 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2838
2839 return found;
2840 }
2841
2842 /* Look through the currently pushed targets. If none of them will
2843 be able to restart the currently running process, issue an error
2844 message. */
2845
2846 void
2847 target_require_runnable (void)
2848 {
2849 struct target_ops *t;
2850
2851 for (t = target_stack; t != NULL; t = t->beneath)
2852 {
2853 /* If this target knows how to create a new program, then
2854 assume we will still be able to after killing the current
2855 one. Either killing and mourning will not pop T, or else
2856 find_default_run_target will find it again. */
2857 if (t->to_create_inferior != NULL)
2858 return;
2859
2860 /* Do not worry about thread_stratum targets that can not
2861 create inferiors. Assume they will be pushed again if
2862 necessary, and continue to the process_stratum. */
2863 if (t->to_stratum == thread_stratum
2864 || t->to_stratum == arch_stratum)
2865 continue;
2866
2867 error (_("The \"%s\" target does not support \"run\". "
2868 "Try \"help target\" or \"continue\"."),
2869 t->to_shortname);
2870 }
2871
2872 /* This function is only called if the target is running. In that
2873 case there should have been a process_stratum target and it
2874 should either know how to create inferiors, or not... */
2875 internal_error (__FILE__, __LINE__, _("No targets found"));
2876 }
2877
2878 /* Look through the list of possible targets for a target that can
2879 execute a run or attach command without any other data. This is
2880 used to locate the default process stratum.
2881
2882 If DO_MESG is not NULL, the result is always valid (error() is
2883 called for errors); else, return NULL on error. */
2884
2885 static struct target_ops *
2886 find_default_run_target (char *do_mesg)
2887 {
2888 struct target_ops **t;
2889 struct target_ops *runable = NULL;
2890 int count;
2891
2892 count = 0;
2893
2894 for (t = target_structs; t < target_structs + target_struct_size;
2895 ++t)
2896 {
2897 if ((*t)->to_can_run && target_can_run (*t))
2898 {
2899 runable = *t;
2900 ++count;
2901 }
2902 }
2903
2904 if (count != 1)
2905 {
2906 if (do_mesg)
2907 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2908 else
2909 return NULL;
2910 }
2911
2912 return runable;
2913 }
2914
2915 void
2916 find_default_attach (struct target_ops *ops, char *args, int from_tty)
2917 {
2918 struct target_ops *t;
2919
2920 t = find_default_run_target ("attach");
2921 (t->to_attach) (t, args, from_tty);
2922 return;
2923 }
2924
2925 void
2926 find_default_create_inferior (struct target_ops *ops,
2927 char *exec_file, char *allargs, char **env,
2928 int from_tty)
2929 {
2930 struct target_ops *t;
2931
2932 t = find_default_run_target ("run");
2933 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
2934 return;
2935 }
2936
2937 static int
2938 find_default_can_async_p (void)
2939 {
2940 struct target_ops *t;
2941
2942 /* This may be called before the target is pushed on the stack;
2943 look for the default process stratum. If there's none, gdb isn't
2944 configured with a native debugger, and target remote isn't
2945 connected yet. */
2946 t = find_default_run_target (NULL);
2947 if (t && t->to_can_async_p)
2948 return (t->to_can_async_p) ();
2949 return 0;
2950 }
2951
2952 static int
2953 find_default_is_async_p (void)
2954 {
2955 struct target_ops *t;
2956
2957 /* This may be called before the target is pushed on the stack;
2958 look for the default process stratum. If there's none, gdb isn't
2959 configured with a native debugger, and target remote isn't
2960 connected yet. */
2961 t = find_default_run_target (NULL);
2962 if (t && t->to_is_async_p)
2963 return (t->to_is_async_p) ();
2964 return 0;
2965 }
2966
2967 static int
2968 find_default_supports_non_stop (void)
2969 {
2970 struct target_ops *t;
2971
2972 t = find_default_run_target (NULL);
2973 if (t && t->to_supports_non_stop)
2974 return (t->to_supports_non_stop) ();
2975 return 0;
2976 }
2977
2978 int
2979 target_supports_non_stop (void)
2980 {
2981 struct target_ops *t;
2982
2983 for (t = &current_target; t != NULL; t = t->beneath)
2984 if (t->to_supports_non_stop)
2985 return t->to_supports_non_stop ();
2986
2987 return 0;
2988 }
2989
2990
2991 char *
2992 target_get_osdata (const char *type)
2993 {
2994 struct target_ops *t;
2995
2996 /* If we're already connected to something that can get us OS
2997 related data, use it. Otherwise, try using the native
2998 target. */
2999 if (current_target.to_stratum >= process_stratum)
3000 t = current_target.beneath;
3001 else
3002 t = find_default_run_target ("get OS data");
3003
3004 if (!t)
3005 return NULL;
3006
3007 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3008 }
3009
3010 /* Determine the current address space of thread PTID. */
3011
3012 struct address_space *
3013 target_thread_address_space (ptid_t ptid)
3014 {
3015 struct address_space *aspace;
3016 struct inferior *inf;
3017 struct target_ops *t;
3018
3019 for (t = current_target.beneath; t != NULL; t = t->beneath)
3020 {
3021 if (t->to_thread_address_space != NULL)
3022 {
3023 aspace = t->to_thread_address_space (t, ptid);
3024 gdb_assert (aspace);
3025
3026 if (targetdebug)
3027 fprintf_unfiltered (gdb_stdlog,
3028 "target_thread_address_space (%s) = %d\n",
3029 target_pid_to_str (ptid),
3030 address_space_num (aspace));
3031 return aspace;
3032 }
3033 }
3034
3035 /* Fall-back to the "main" address space of the inferior. */
3036 inf = find_inferior_pid (ptid_get_pid (ptid));
3037
3038 if (inf == NULL || inf->aspace == NULL)
3039 internal_error (__FILE__, __LINE__,
3040 _("Can't determine the current "
3041 "address space of thread %s\n"),
3042 target_pid_to_str (ptid));
3043
3044 return inf->aspace;
3045 }
3046
3047 static int
3048 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3049 {
3050 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3051 }
3052
3053 static int
3054 default_watchpoint_addr_within_range (struct target_ops *target,
3055 CORE_ADDR addr,
3056 CORE_ADDR start, int length)
3057 {
3058 return addr >= start && addr < start + length;
3059 }
3060
3061 static struct gdbarch *
3062 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3063 {
3064 return target_gdbarch;
3065 }
3066
3067 static int
3068 return_zero (void)
3069 {
3070 return 0;
3071 }
3072
3073 static int
3074 return_one (void)
3075 {
3076 return 1;
3077 }
3078
3079 static int
3080 return_minus_one (void)
3081 {
3082 return -1;
3083 }
3084
3085 /* Find a single runnable target in the stack and return it. If for
3086 some reason there is more than one, return NULL. */
3087
3088 struct target_ops *
3089 find_run_target (void)
3090 {
3091 struct target_ops **t;
3092 struct target_ops *runable = NULL;
3093 int count;
3094
3095 count = 0;
3096
3097 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3098 {
3099 if ((*t)->to_can_run && target_can_run (*t))
3100 {
3101 runable = *t;
3102 ++count;
3103 }
3104 }
3105
3106 return (count == 1 ? runable : NULL);
3107 }
3108
3109 /*
3110 * Find the next target down the stack from the specified target.
3111 */
3112
3113 struct target_ops *
3114 find_target_beneath (struct target_ops *t)
3115 {
3116 return t->beneath;
3117 }
3118
3119 \f
3120 /* The inferior process has died. Long live the inferior! */
3121
3122 void
3123 generic_mourn_inferior (void)
3124 {
3125 ptid_t ptid;
3126
3127 ptid = inferior_ptid;
3128 inferior_ptid = null_ptid;
3129
3130 if (!ptid_equal (ptid, null_ptid))
3131 {
3132 int pid = ptid_get_pid (ptid);
3133 exit_inferior (pid);
3134 }
3135
3136 breakpoint_init_inferior (inf_exited);
3137 registers_changed ();
3138
3139 reopen_exec_file ();
3140 reinit_frame_cache ();
3141
3142 if (deprecated_detach_hook)
3143 deprecated_detach_hook ();
3144 }
3145 \f
3146 /* Helper function for child_wait and the derivatives of child_wait.
3147 HOSTSTATUS is the waitstatus from wait() or the equivalent; store our
3148 translation of that in OURSTATUS. */
3149 void
3150 store_waitstatus (struct target_waitstatus *ourstatus, int hoststatus)
3151 {
3152 if (WIFEXITED (hoststatus))
3153 {
3154 ourstatus->kind = TARGET_WAITKIND_EXITED;
3155 ourstatus->value.integer = WEXITSTATUS (hoststatus);
3156 }
3157 else if (!WIFSTOPPED (hoststatus))
3158 {
3159 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3160 ourstatus->value.sig = target_signal_from_host (WTERMSIG (hoststatus));
3161 }
3162 else
3163 {
3164 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3165 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (hoststatus));
3166 }
3167 }
3168 \f
3169 /* Convert a normal process ID to a string. Returns the string in a
3170 static buffer. */
3171
3172 char *
3173 normal_pid_to_str (ptid_t ptid)
3174 {
3175 static char buf[32];
3176
3177 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3178 return buf;
3179 }
3180
3181 static char *
3182 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3183 {
3184 return normal_pid_to_str (ptid);
3185 }
3186
3187 /* Error-catcher for target_find_memory_regions. */
3188 static int
3189 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3190 {
3191 error (_("Command not implemented for this target."));
3192 return 0;
3193 }
3194
3195 /* Error-catcher for target_make_corefile_notes. */
3196 static char *
3197 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3198 {
3199 error (_("Command not implemented for this target."));
3200 return NULL;
3201 }
3202
3203 /* Error-catcher for target_get_bookmark. */
3204 static gdb_byte *
3205 dummy_get_bookmark (char *ignore1, int ignore2)
3206 {
3207 tcomplain ();
3208 return NULL;
3209 }
3210
3211 /* Error-catcher for target_goto_bookmark. */
3212 static void
3213 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3214 {
3215 tcomplain ();
3216 }
3217
3218 /* Set up the handful of non-empty slots needed by the dummy target
3219 vector. */
3220
3221 static void
3222 init_dummy_target (void)
3223 {
3224 dummy_target.to_shortname = "None";
3225 dummy_target.to_longname = "None";
3226 dummy_target.to_doc = "";
3227 dummy_target.to_attach = find_default_attach;
3228 dummy_target.to_detach =
3229 (void (*)(struct target_ops *, char *, int))target_ignore;
3230 dummy_target.to_create_inferior = find_default_create_inferior;
3231 dummy_target.to_can_async_p = find_default_can_async_p;
3232 dummy_target.to_is_async_p = find_default_is_async_p;
3233 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3234 dummy_target.to_pid_to_str = dummy_pid_to_str;
3235 dummy_target.to_stratum = dummy_stratum;
3236 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3237 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3238 dummy_target.to_get_bookmark = dummy_get_bookmark;
3239 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3240 dummy_target.to_xfer_partial = default_xfer_partial;
3241 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3242 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3243 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3244 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3245 dummy_target.to_has_execution
3246 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3247 dummy_target.to_stopped_by_watchpoint = return_zero;
3248 dummy_target.to_stopped_data_address =
3249 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3250 dummy_target.to_magic = OPS_MAGIC;
3251 }
3252 \f
3253 static void
3254 debug_to_open (char *args, int from_tty)
3255 {
3256 debug_target.to_open (args, from_tty);
3257
3258 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3259 }
3260
3261 void
3262 target_close (struct target_ops *targ, int quitting)
3263 {
3264 if (targ->to_xclose != NULL)
3265 targ->to_xclose (targ, quitting);
3266 else if (targ->to_close != NULL)
3267 targ->to_close (quitting);
3268
3269 if (targetdebug)
3270 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3271 }
3272
3273 void
3274 target_attach (char *args, int from_tty)
3275 {
3276 struct target_ops *t;
3277
3278 for (t = current_target.beneath; t != NULL; t = t->beneath)
3279 {
3280 if (t->to_attach != NULL)
3281 {
3282 t->to_attach (t, args, from_tty);
3283 if (targetdebug)
3284 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3285 args, from_tty);
3286 return;
3287 }
3288 }
3289
3290 internal_error (__FILE__, __LINE__,
3291 _("could not find a target to attach"));
3292 }
3293
3294 int
3295 target_thread_alive (ptid_t ptid)
3296 {
3297 struct target_ops *t;
3298
3299 for (t = current_target.beneath; t != NULL; t = t->beneath)
3300 {
3301 if (t->to_thread_alive != NULL)
3302 {
3303 int retval;
3304
3305 retval = t->to_thread_alive (t, ptid);
3306 if (targetdebug)
3307 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3308 PIDGET (ptid), retval);
3309
3310 return retval;
3311 }
3312 }
3313
3314 return 0;
3315 }
3316
3317 void
3318 target_find_new_threads (void)
3319 {
3320 struct target_ops *t;
3321
3322 for (t = current_target.beneath; t != NULL; t = t->beneath)
3323 {
3324 if (t->to_find_new_threads != NULL)
3325 {
3326 t->to_find_new_threads (t);
3327 if (targetdebug)
3328 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3329
3330 return;
3331 }
3332 }
3333 }
3334
3335 void
3336 target_stop (ptid_t ptid)
3337 {
3338 if (!may_stop)
3339 {
3340 warning (_("May not interrupt or stop the target, ignoring attempt"));
3341 return;
3342 }
3343
3344 (*current_target.to_stop) (ptid);
3345 }
3346
3347 static void
3348 debug_to_post_attach (int pid)
3349 {
3350 debug_target.to_post_attach (pid);
3351
3352 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3353 }
3354
3355 /* Return a pretty printed form of target_waitstatus.
3356 Space for the result is malloc'd, caller must free. */
3357
3358 char *
3359 target_waitstatus_to_string (const struct target_waitstatus *ws)
3360 {
3361 const char *kind_str = "status->kind = ";
3362
3363 switch (ws->kind)
3364 {
3365 case TARGET_WAITKIND_EXITED:
3366 return xstrprintf ("%sexited, status = %d",
3367 kind_str, ws->value.integer);
3368 case TARGET_WAITKIND_STOPPED:
3369 return xstrprintf ("%sstopped, signal = %s",
3370 kind_str, target_signal_to_name (ws->value.sig));
3371 case TARGET_WAITKIND_SIGNALLED:
3372 return xstrprintf ("%ssignalled, signal = %s",
3373 kind_str, target_signal_to_name (ws->value.sig));
3374 case TARGET_WAITKIND_LOADED:
3375 return xstrprintf ("%sloaded", kind_str);
3376 case TARGET_WAITKIND_FORKED:
3377 return xstrprintf ("%sforked", kind_str);
3378 case TARGET_WAITKIND_VFORKED:
3379 return xstrprintf ("%svforked", kind_str);
3380 case TARGET_WAITKIND_EXECD:
3381 return xstrprintf ("%sexecd", kind_str);
3382 case TARGET_WAITKIND_SYSCALL_ENTRY:
3383 return xstrprintf ("%sentered syscall", kind_str);
3384 case TARGET_WAITKIND_SYSCALL_RETURN:
3385 return xstrprintf ("%sexited syscall", kind_str);
3386 case TARGET_WAITKIND_SPURIOUS:
3387 return xstrprintf ("%sspurious", kind_str);
3388 case TARGET_WAITKIND_IGNORE:
3389 return xstrprintf ("%signore", kind_str);
3390 case TARGET_WAITKIND_NO_HISTORY:
3391 return xstrprintf ("%sno-history", kind_str);
3392 default:
3393 return xstrprintf ("%sunknown???", kind_str);
3394 }
3395 }
3396
3397 static void
3398 debug_print_register (const char * func,
3399 struct regcache *regcache, int regno)
3400 {
3401 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3402
3403 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3404 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3405 && gdbarch_register_name (gdbarch, regno) != NULL
3406 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3407 fprintf_unfiltered (gdb_stdlog, "(%s)",
3408 gdbarch_register_name (gdbarch, regno));
3409 else
3410 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3411 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3412 {
3413 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3414 int i, size = register_size (gdbarch, regno);
3415 unsigned char buf[MAX_REGISTER_SIZE];
3416
3417 regcache_raw_collect (regcache, regno, buf);
3418 fprintf_unfiltered (gdb_stdlog, " = ");
3419 for (i = 0; i < size; i++)
3420 {
3421 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3422 }
3423 if (size <= sizeof (LONGEST))
3424 {
3425 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3426
3427 fprintf_unfiltered (gdb_stdlog, " %s %s",
3428 core_addr_to_string_nz (val), plongest (val));
3429 }
3430 }
3431 fprintf_unfiltered (gdb_stdlog, "\n");
3432 }
3433
3434 void
3435 target_fetch_registers (struct regcache *regcache, int regno)
3436 {
3437 struct target_ops *t;
3438
3439 for (t = current_target.beneath; t != NULL; t = t->beneath)
3440 {
3441 if (t->to_fetch_registers != NULL)
3442 {
3443 t->to_fetch_registers (t, regcache, regno);
3444 if (targetdebug)
3445 debug_print_register ("target_fetch_registers", regcache, regno);
3446 return;
3447 }
3448 }
3449 }
3450
3451 void
3452 target_store_registers (struct regcache *regcache, int regno)
3453 {
3454 struct target_ops *t;
3455
3456 if (!may_write_registers)
3457 error (_("Writing to registers is not allowed (regno %d)"), regno);
3458
3459 for (t = current_target.beneath; t != NULL; t = t->beneath)
3460 {
3461 if (t->to_store_registers != NULL)
3462 {
3463 t->to_store_registers (t, regcache, regno);
3464 if (targetdebug)
3465 {
3466 debug_print_register ("target_store_registers", regcache, regno);
3467 }
3468 return;
3469 }
3470 }
3471
3472 noprocess ();
3473 }
3474
3475 int
3476 target_core_of_thread (ptid_t ptid)
3477 {
3478 struct target_ops *t;
3479
3480 for (t = current_target.beneath; t != NULL; t = t->beneath)
3481 {
3482 if (t->to_core_of_thread != NULL)
3483 {
3484 int retval = t->to_core_of_thread (t, ptid);
3485
3486 if (targetdebug)
3487 fprintf_unfiltered (gdb_stdlog,
3488 "target_core_of_thread (%d) = %d\n",
3489 PIDGET (ptid), retval);
3490 return retval;
3491 }
3492 }
3493
3494 return -1;
3495 }
3496
3497 int
3498 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3499 {
3500 struct target_ops *t;
3501
3502 for (t = current_target.beneath; t != NULL; t = t->beneath)
3503 {
3504 if (t->to_verify_memory != NULL)
3505 {
3506 int retval = t->to_verify_memory (t, data, memaddr, size);
3507
3508 if (targetdebug)
3509 fprintf_unfiltered (gdb_stdlog,
3510 "target_verify_memory (%s, %s) = %d\n",
3511 paddress (target_gdbarch, memaddr),
3512 pulongest (size),
3513 retval);
3514 return retval;
3515 }
3516 }
3517
3518 tcomplain ();
3519 }
3520
3521 /* The documentation for this function is in its prototype declaration
3522 in target.h. */
3523
3524 int
3525 target_ranged_break_num_registers (void)
3526 {
3527 struct target_ops *t;
3528
3529 for (t = current_target.beneath; t != NULL; t = t->beneath)
3530 if (t->to_ranged_break_num_registers != NULL)
3531 return t->to_ranged_break_num_registers (t);
3532
3533 return -1;
3534 }
3535
3536 static void
3537 debug_to_prepare_to_store (struct regcache *regcache)
3538 {
3539 debug_target.to_prepare_to_store (regcache);
3540
3541 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
3542 }
3543
3544 static int
3545 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
3546 int write, struct mem_attrib *attrib,
3547 struct target_ops *target)
3548 {
3549 int retval;
3550
3551 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
3552 attrib, target);
3553
3554 fprintf_unfiltered (gdb_stdlog,
3555 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
3556 paddress (target_gdbarch, memaddr), len,
3557 write ? "write" : "read", retval);
3558
3559 if (retval > 0)
3560 {
3561 int i;
3562
3563 fputs_unfiltered (", bytes =", gdb_stdlog);
3564 for (i = 0; i < retval; i++)
3565 {
3566 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
3567 {
3568 if (targetdebug < 2 && i > 0)
3569 {
3570 fprintf_unfiltered (gdb_stdlog, " ...");
3571 break;
3572 }
3573 fprintf_unfiltered (gdb_stdlog, "\n");
3574 }
3575
3576 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
3577 }
3578 }
3579
3580 fputc_unfiltered ('\n', gdb_stdlog);
3581
3582 return retval;
3583 }
3584
3585 static void
3586 debug_to_files_info (struct target_ops *target)
3587 {
3588 debug_target.to_files_info (target);
3589
3590 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
3591 }
3592
3593 static int
3594 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
3595 struct bp_target_info *bp_tgt)
3596 {
3597 int retval;
3598
3599 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
3600
3601 fprintf_unfiltered (gdb_stdlog,
3602 "target_insert_breakpoint (%s, xxx) = %ld\n",
3603 core_addr_to_string (bp_tgt->placed_address),
3604 (unsigned long) retval);
3605 return retval;
3606 }
3607
3608 static int
3609 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
3610 struct bp_target_info *bp_tgt)
3611 {
3612 int retval;
3613
3614 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
3615
3616 fprintf_unfiltered (gdb_stdlog,
3617 "target_remove_breakpoint (%s, xxx) = %ld\n",
3618 core_addr_to_string (bp_tgt->placed_address),
3619 (unsigned long) retval);
3620 return retval;
3621 }
3622
3623 static int
3624 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
3625 {
3626 int retval;
3627
3628 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
3629
3630 fprintf_unfiltered (gdb_stdlog,
3631 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
3632 (unsigned long) type,
3633 (unsigned long) cnt,
3634 (unsigned long) from_tty,
3635 (unsigned long) retval);
3636 return retval;
3637 }
3638
3639 static int
3640 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3641 {
3642 CORE_ADDR retval;
3643
3644 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
3645
3646 fprintf_unfiltered (gdb_stdlog,
3647 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
3648 core_addr_to_string (addr), (unsigned long) len,
3649 core_addr_to_string (retval));
3650 return retval;
3651 }
3652
3653 static int
3654 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
3655 struct expression *cond)
3656 {
3657 int retval;
3658
3659 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
3660 rw, cond);
3661
3662 fprintf_unfiltered (gdb_stdlog,
3663 "target_can_accel_watchpoint_condition "
3664 "(%s, %d, %d, %s) = %ld\n",
3665 core_addr_to_string (addr), len, rw,
3666 host_address_to_string (cond), (unsigned long) retval);
3667 return retval;
3668 }
3669
3670 static int
3671 debug_to_stopped_by_watchpoint (void)
3672 {
3673 int retval;
3674
3675 retval = debug_target.to_stopped_by_watchpoint ();
3676
3677 fprintf_unfiltered (gdb_stdlog,
3678 "target_stopped_by_watchpoint () = %ld\n",
3679 (unsigned long) retval);
3680 return retval;
3681 }
3682
3683 static int
3684 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
3685 {
3686 int retval;
3687
3688 retval = debug_target.to_stopped_data_address (target, addr);
3689
3690 fprintf_unfiltered (gdb_stdlog,
3691 "target_stopped_data_address ([%s]) = %ld\n",
3692 core_addr_to_string (*addr),
3693 (unsigned long)retval);
3694 return retval;
3695 }
3696
3697 static int
3698 debug_to_watchpoint_addr_within_range (struct target_ops *target,
3699 CORE_ADDR addr,
3700 CORE_ADDR start, int length)
3701 {
3702 int retval;
3703
3704 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
3705 start, length);
3706
3707 fprintf_filtered (gdb_stdlog,
3708 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
3709 core_addr_to_string (addr), core_addr_to_string (start),
3710 length, retval);
3711 return retval;
3712 }
3713
3714 static int
3715 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
3716 struct bp_target_info *bp_tgt)
3717 {
3718 int retval;
3719
3720 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
3721
3722 fprintf_unfiltered (gdb_stdlog,
3723 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
3724 core_addr_to_string (bp_tgt->placed_address),
3725 (unsigned long) retval);
3726 return retval;
3727 }
3728
3729 static int
3730 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
3731 struct bp_target_info *bp_tgt)
3732 {
3733 int retval;
3734
3735 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
3736
3737 fprintf_unfiltered (gdb_stdlog,
3738 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
3739 core_addr_to_string (bp_tgt->placed_address),
3740 (unsigned long) retval);
3741 return retval;
3742 }
3743
3744 static int
3745 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
3746 struct expression *cond)
3747 {
3748 int retval;
3749
3750 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
3751
3752 fprintf_unfiltered (gdb_stdlog,
3753 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
3754 core_addr_to_string (addr), len, type,
3755 host_address_to_string (cond), (unsigned long) retval);
3756 return retval;
3757 }
3758
3759 static int
3760 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
3761 struct expression *cond)
3762 {
3763 int retval;
3764
3765 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
3766
3767 fprintf_unfiltered (gdb_stdlog,
3768 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
3769 core_addr_to_string (addr), len, type,
3770 host_address_to_string (cond), (unsigned long) retval);
3771 return retval;
3772 }
3773
3774 static void
3775 debug_to_terminal_init (void)
3776 {
3777 debug_target.to_terminal_init ();
3778
3779 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
3780 }
3781
3782 static void
3783 debug_to_terminal_inferior (void)
3784 {
3785 debug_target.to_terminal_inferior ();
3786
3787 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
3788 }
3789
3790 static void
3791 debug_to_terminal_ours_for_output (void)
3792 {
3793 debug_target.to_terminal_ours_for_output ();
3794
3795 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
3796 }
3797
3798 static void
3799 debug_to_terminal_ours (void)
3800 {
3801 debug_target.to_terminal_ours ();
3802
3803 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
3804 }
3805
3806 static void
3807 debug_to_terminal_save_ours (void)
3808 {
3809 debug_target.to_terminal_save_ours ();
3810
3811 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
3812 }
3813
3814 static void
3815 debug_to_terminal_info (char *arg, int from_tty)
3816 {
3817 debug_target.to_terminal_info (arg, from_tty);
3818
3819 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
3820 from_tty);
3821 }
3822
3823 static void
3824 debug_to_load (char *args, int from_tty)
3825 {
3826 debug_target.to_load (args, from_tty);
3827
3828 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
3829 }
3830
3831 static void
3832 debug_to_post_startup_inferior (ptid_t ptid)
3833 {
3834 debug_target.to_post_startup_inferior (ptid);
3835
3836 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
3837 PIDGET (ptid));
3838 }
3839
3840 static int
3841 debug_to_insert_fork_catchpoint (int pid)
3842 {
3843 int retval;
3844
3845 retval = debug_target.to_insert_fork_catchpoint (pid);
3846
3847 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
3848 pid, retval);
3849
3850 return retval;
3851 }
3852
3853 static int
3854 debug_to_remove_fork_catchpoint (int pid)
3855 {
3856 int retval;
3857
3858 retval = debug_target.to_remove_fork_catchpoint (pid);
3859
3860 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
3861 pid, retval);
3862
3863 return retval;
3864 }
3865
3866 static int
3867 debug_to_insert_vfork_catchpoint (int pid)
3868 {
3869 int retval;
3870
3871 retval = debug_target.to_insert_vfork_catchpoint (pid);
3872
3873 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
3874 pid, retval);
3875
3876 return retval;
3877 }
3878
3879 static int
3880 debug_to_remove_vfork_catchpoint (int pid)
3881 {
3882 int retval;
3883
3884 retval = debug_target.to_remove_vfork_catchpoint (pid);
3885
3886 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
3887 pid, retval);
3888
3889 return retval;
3890 }
3891
3892 static int
3893 debug_to_insert_exec_catchpoint (int pid)
3894 {
3895 int retval;
3896
3897 retval = debug_target.to_insert_exec_catchpoint (pid);
3898
3899 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
3900 pid, retval);
3901
3902 return retval;
3903 }
3904
3905 static int
3906 debug_to_remove_exec_catchpoint (int pid)
3907 {
3908 int retval;
3909
3910 retval = debug_target.to_remove_exec_catchpoint (pid);
3911
3912 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
3913 pid, retval);
3914
3915 return retval;
3916 }
3917
3918 static int
3919 debug_to_has_exited (int pid, int wait_status, int *exit_status)
3920 {
3921 int has_exited;
3922
3923 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
3924
3925 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
3926 pid, wait_status, *exit_status, has_exited);
3927
3928 return has_exited;
3929 }
3930
3931 static int
3932 debug_to_can_run (void)
3933 {
3934 int retval;
3935
3936 retval = debug_target.to_can_run ();
3937
3938 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
3939
3940 return retval;
3941 }
3942
3943 static struct gdbarch *
3944 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
3945 {
3946 struct gdbarch *retval;
3947
3948 retval = debug_target.to_thread_architecture (ops, ptid);
3949
3950 fprintf_unfiltered (gdb_stdlog,
3951 "target_thread_architecture (%s) = %s [%s]\n",
3952 target_pid_to_str (ptid),
3953 host_address_to_string (retval),
3954 gdbarch_bfd_arch_info (retval)->printable_name);
3955 return retval;
3956 }
3957
3958 static void
3959 debug_to_stop (ptid_t ptid)
3960 {
3961 debug_target.to_stop (ptid);
3962
3963 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
3964 target_pid_to_str (ptid));
3965 }
3966
3967 static void
3968 debug_to_rcmd (char *command,
3969 struct ui_file *outbuf)
3970 {
3971 debug_target.to_rcmd (command, outbuf);
3972 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
3973 }
3974
3975 static char *
3976 debug_to_pid_to_exec_file (int pid)
3977 {
3978 char *exec_file;
3979
3980 exec_file = debug_target.to_pid_to_exec_file (pid);
3981
3982 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
3983 pid, exec_file);
3984
3985 return exec_file;
3986 }
3987
3988 static void
3989 setup_target_debug (void)
3990 {
3991 memcpy (&debug_target, &current_target, sizeof debug_target);
3992
3993 current_target.to_open = debug_to_open;
3994 current_target.to_post_attach = debug_to_post_attach;
3995 current_target.to_prepare_to_store = debug_to_prepare_to_store;
3996 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
3997 current_target.to_files_info = debug_to_files_info;
3998 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
3999 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4000 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4001 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4002 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4003 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4004 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4005 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4006 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4007 current_target.to_watchpoint_addr_within_range
4008 = debug_to_watchpoint_addr_within_range;
4009 current_target.to_region_ok_for_hw_watchpoint
4010 = debug_to_region_ok_for_hw_watchpoint;
4011 current_target.to_can_accel_watchpoint_condition
4012 = debug_to_can_accel_watchpoint_condition;
4013 current_target.to_terminal_init = debug_to_terminal_init;
4014 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4015 current_target.to_terminal_ours_for_output
4016 = debug_to_terminal_ours_for_output;
4017 current_target.to_terminal_ours = debug_to_terminal_ours;
4018 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4019 current_target.to_terminal_info = debug_to_terminal_info;
4020 current_target.to_load = debug_to_load;
4021 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4022 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4023 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4024 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4025 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4026 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4027 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4028 current_target.to_has_exited = debug_to_has_exited;
4029 current_target.to_can_run = debug_to_can_run;
4030 current_target.to_stop = debug_to_stop;
4031 current_target.to_rcmd = debug_to_rcmd;
4032 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4033 current_target.to_thread_architecture = debug_to_thread_architecture;
4034 }
4035 \f
4036
4037 static char targ_desc[] =
4038 "Names of targets and files being debugged.\nShows the entire \
4039 stack of targets currently in use (including the exec-file,\n\
4040 core-file, and process, if any), as well as the symbol file name.";
4041
4042 static void
4043 do_monitor_command (char *cmd,
4044 int from_tty)
4045 {
4046 if ((current_target.to_rcmd
4047 == (void (*) (char *, struct ui_file *)) tcomplain)
4048 || (current_target.to_rcmd == debug_to_rcmd
4049 && (debug_target.to_rcmd
4050 == (void (*) (char *, struct ui_file *)) tcomplain)))
4051 error (_("\"monitor\" command not supported by this target."));
4052 target_rcmd (cmd, gdb_stdtarg);
4053 }
4054
4055 /* Print the name of each layers of our target stack. */
4056
4057 static void
4058 maintenance_print_target_stack (char *cmd, int from_tty)
4059 {
4060 struct target_ops *t;
4061
4062 printf_filtered (_("The current target stack is:\n"));
4063
4064 for (t = target_stack; t != NULL; t = t->beneath)
4065 {
4066 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4067 }
4068 }
4069
4070 /* Controls if async mode is permitted. */
4071 int target_async_permitted = 0;
4072
4073 /* The set command writes to this variable. If the inferior is
4074 executing, linux_nat_async_permitted is *not* updated. */
4075 static int target_async_permitted_1 = 0;
4076
4077 static void
4078 set_maintenance_target_async_permitted (char *args, int from_tty,
4079 struct cmd_list_element *c)
4080 {
4081 if (have_live_inferiors ())
4082 {
4083 target_async_permitted_1 = target_async_permitted;
4084 error (_("Cannot change this setting while the inferior is running."));
4085 }
4086
4087 target_async_permitted = target_async_permitted_1;
4088 }
4089
4090 static void
4091 show_maintenance_target_async_permitted (struct ui_file *file, int from_tty,
4092 struct cmd_list_element *c,
4093 const char *value)
4094 {
4095 fprintf_filtered (file,
4096 _("Controlling the inferior in "
4097 "asynchronous mode is %s.\n"), value);
4098 }
4099
4100 /* Temporary copies of permission settings. */
4101
4102 static int may_write_registers_1 = 1;
4103 static int may_write_memory_1 = 1;
4104 static int may_insert_breakpoints_1 = 1;
4105 static int may_insert_tracepoints_1 = 1;
4106 static int may_insert_fast_tracepoints_1 = 1;
4107 static int may_stop_1 = 1;
4108
4109 /* Make the user-set values match the real values again. */
4110
4111 void
4112 update_target_permissions (void)
4113 {
4114 may_write_registers_1 = may_write_registers;
4115 may_write_memory_1 = may_write_memory;
4116 may_insert_breakpoints_1 = may_insert_breakpoints;
4117 may_insert_tracepoints_1 = may_insert_tracepoints;
4118 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4119 may_stop_1 = may_stop;
4120 }
4121
4122 /* The one function handles (most of) the permission flags in the same
4123 way. */
4124
4125 static void
4126 set_target_permissions (char *args, int from_tty,
4127 struct cmd_list_element *c)
4128 {
4129 if (target_has_execution)
4130 {
4131 update_target_permissions ();
4132 error (_("Cannot change this setting while the inferior is running."));
4133 }
4134
4135 /* Make the real values match the user-changed values. */
4136 may_write_registers = may_write_registers_1;
4137 may_insert_breakpoints = may_insert_breakpoints_1;
4138 may_insert_tracepoints = may_insert_tracepoints_1;
4139 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4140 may_stop = may_stop_1;
4141 update_observer_mode ();
4142 }
4143
4144 /* Set memory write permission independently of observer mode. */
4145
4146 static void
4147 set_write_memory_permission (char *args, int from_tty,
4148 struct cmd_list_element *c)
4149 {
4150 /* Make the real values match the user-changed values. */
4151 may_write_memory = may_write_memory_1;
4152 update_observer_mode ();
4153 }
4154
4155
4156 void
4157 initialize_targets (void)
4158 {
4159 init_dummy_target ();
4160 push_target (&dummy_target);
4161
4162 add_info ("target", target_info, targ_desc);
4163 add_info ("files", target_info, targ_desc);
4164
4165 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4166 Set target debugging."), _("\
4167 Show target debugging."), _("\
4168 When non-zero, target debugging is enabled. Higher numbers are more\n\
4169 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4170 command."),
4171 NULL,
4172 show_targetdebug,
4173 &setdebuglist, &showdebuglist);
4174
4175 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4176 &trust_readonly, _("\
4177 Set mode for reading from readonly sections."), _("\
4178 Show mode for reading from readonly sections."), _("\
4179 When this mode is on, memory reads from readonly sections (such as .text)\n\
4180 will be read from the object file instead of from the target. This will\n\
4181 result in significant performance improvement for remote targets."),
4182 NULL,
4183 show_trust_readonly,
4184 &setlist, &showlist);
4185
4186 add_com ("monitor", class_obscure, do_monitor_command,
4187 _("Send a command to the remote monitor (remote targets only)."));
4188
4189 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4190 _("Print the name of each layer of the internal target stack."),
4191 &maintenanceprintlist);
4192
4193 add_setshow_boolean_cmd ("target-async", no_class,
4194 &target_async_permitted_1, _("\
4195 Set whether gdb controls the inferior in asynchronous mode."), _("\
4196 Show whether gdb controls the inferior in asynchronous mode."), _("\
4197 Tells gdb whether to control the inferior in asynchronous mode."),
4198 set_maintenance_target_async_permitted,
4199 show_maintenance_target_async_permitted,
4200 &setlist,
4201 &showlist);
4202
4203 add_setshow_boolean_cmd ("stack-cache", class_support,
4204 &stack_cache_enabled_p_1, _("\
4205 Set cache use for stack access."), _("\
4206 Show cache use for stack access."), _("\
4207 When on, use the data cache for all stack access, regardless of any\n\
4208 configured memory regions. This improves remote performance significantly.\n\
4209 By default, caching for stack access is on."),
4210 set_stack_cache_enabled_p,
4211 show_stack_cache_enabled_p,
4212 &setlist, &showlist);
4213
4214 add_setshow_boolean_cmd ("may-write-registers", class_support,
4215 &may_write_registers_1, _("\
4216 Set permission to write into registers."), _("\
4217 Show permission to write into registers."), _("\
4218 When this permission is on, GDB may write into the target's registers.\n\
4219 Otherwise, any sort of write attempt will result in an error."),
4220 set_target_permissions, NULL,
4221 &setlist, &showlist);
4222
4223 add_setshow_boolean_cmd ("may-write-memory", class_support,
4224 &may_write_memory_1, _("\
4225 Set permission to write into target memory."), _("\
4226 Show permission to write into target memory."), _("\
4227 When this permission is on, GDB may write into the target's memory.\n\
4228 Otherwise, any sort of write attempt will result in an error."),
4229 set_write_memory_permission, NULL,
4230 &setlist, &showlist);
4231
4232 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4233 &may_insert_breakpoints_1, _("\
4234 Set permission to insert breakpoints in the target."), _("\
4235 Show permission to insert breakpoints in the target."), _("\
4236 When this permission is on, GDB may insert breakpoints in the program.\n\
4237 Otherwise, any sort of insertion attempt will result in an error."),
4238 set_target_permissions, NULL,
4239 &setlist, &showlist);
4240
4241 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4242 &may_insert_tracepoints_1, _("\
4243 Set permission to insert tracepoints in the target."), _("\
4244 Show permission to insert tracepoints in the target."), _("\
4245 When this permission is on, GDB may insert tracepoints in the program.\n\
4246 Otherwise, any sort of insertion attempt will result in an error."),
4247 set_target_permissions, NULL,
4248 &setlist, &showlist);
4249
4250 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4251 &may_insert_fast_tracepoints_1, _("\
4252 Set permission to insert fast tracepoints in the target."), _("\
4253 Show permission to insert fast tracepoints in the target."), _("\
4254 When this permission is on, GDB may insert fast tracepoints.\n\
4255 Otherwise, any sort of insertion attempt will result in an error."),
4256 set_target_permissions, NULL,
4257 &setlist, &showlist);
4258
4259 add_setshow_boolean_cmd ("may-interrupt", class_support,
4260 &may_stop_1, _("\
4261 Set permission to interrupt or signal the target."), _("\
4262 Show permission to interrupt or signal the target."), _("\
4263 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4264 Otherwise, any attempt to interrupt or stop will be ignored."),
4265 set_target_permissions, NULL,
4266 &setlist, &showlist);
4267
4268
4269 target_dcache = dcache_init ();
4270 }
This page took 0.11367 seconds and 5 git commands to generate.